gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" This is a very crude version of "in-memory HBase", which implements just
enough functionality of HappyBase API to support testing of our driver.
"""
import copy
import re
from oslo_log import log
import six
import report
from report.i18n import _
LOG = log.getLogger(__name__)
class MTable(object):
"""HappyBase.Table mock."""
def __init__(self, name, families):
self.name = name
self.families = families
self._rows_with_ts = {}
def row(self, key, columns=None):
if key not in self._rows_with_ts:
return {}
res = copy.copy(sorted(six.iteritems(
self._rows_with_ts.get(key)))[-1][1])
if columns:
keys = res.keys()
for key in keys:
if key not in columns:
res.pop(key)
return res
def rows(self, keys):
return ((k, self.row(k)) for k in keys)
def put(self, key, data, ts=None):
# Note: Now we use 'timestamped' but only for one Resource table.
# That's why we may put ts='0' in case when ts is None. If it is
# needed to use 2 types of put in one table ts=0 cannot be used.
if ts is None:
ts = "0"
if key not in self._rows_with_ts:
self._rows_with_ts[key] = {ts: data}
else:
if ts in self._rows_with_ts[key]:
self._rows_with_ts[key][ts].update(data)
else:
self._rows_with_ts[key].update({ts: data})
def delete(self, key):
del self._rows_with_ts[key]
def _get_latest_dict(self, row):
# The idea here is to return latest versions of columns.
# In _rows_with_ts we store {row: {ts_1: {data}, ts_2: {data}}}.
# res will contain a list of tuples [(ts_1, {data}), (ts_2, {data})]
# sorted by ts, i.e. in this list ts_2 is the most latest.
# To get result as HBase provides we should iterate in reverse order
# and get from "latest" data only key-values that are not in newer data
data = {}
for i in sorted(six.iteritems(self._rows_with_ts[row])):
data.update(i[1])
return data
def scan(self, filter=None, columns=None, row_start=None, row_stop=None,
limit=None):
columns = columns or []
sorted_keys = sorted(self._rows_with_ts)
# copy data between row_start and row_stop into a dict
rows = {}
for row in sorted_keys:
if row_start and row < row_start:
continue
if row_stop and row > row_stop:
break
rows[row] = self._get_latest_dict(row)
if columns:
ret = {}
for row, data in six.iteritems(rows):
for key in data:
if key in columns:
ret[row] = data
rows = ret
if filter:
# TODO(jdanjou): we should really parse this properly,
# but at the moment we are only going to support AND here
filters = filter.split('AND')
for f in filters:
# Extract filter name and its arguments
g = re.search("(.*)\((.*),?\)", f)
fname = g.group(1).strip()
fargs = [s.strip().replace('\'', '')
for s in g.group(2).split(',')]
m = getattr(self, fname)
if callable(m):
# overwrite rows for filtering to take effect
# in case of multiple filters
rows = m(fargs, rows)
else:
raise report.NotImplementedError(
"%s filter is not implemented, "
"you may want to add it!")
for k in sorted(rows)[:limit]:
yield k, rows[k]
@staticmethod
def SingleColumnValueFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'SingleColumnValueFilter'
is found in the 'filter' argument.
"""
op = args[2]
column = "%s:%s" % (args[0], args[1])
value = args[3]
if value.startswith('binary:'):
value = value[7:]
r = {}
for row in rows:
data = rows[row]
if op == '=':
if column in data and data[column] == value:
r[row] = data
elif op == '<':
if column in data and data[column] < value:
r[row] = data
elif op == '<=':
if column in data and data[column] <= value:
r[row] = data
elif op == '>':
if column in data and data[column] > value:
r[row] = data
elif op == '>=':
if column in data and data[column] >= value:
r[row] = data
elif op == '!=':
if column in data and data[column] != value:
r[row] = data
return r
@staticmethod
def ColumnPrefixFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'ColumnPrefixFilter' is found
in the 'filter' argument.
:param args: a list of filter arguments, contain prefix of column
:param rows: a dict of row prefixes for filtering
"""
value = args[0]
column = 'f:' + value
r = {}
for row, data in rows.items():
column_dict = {}
for key in data:
if key.startswith(column):
column_dict[key] = data[key]
r[row] = column_dict
return r
@staticmethod
def RowFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'RowFilter' is found in the
'filter' argument.
:param args: a list of filter arguments, it contains operator and
sought string
:param rows: a dict of rows which are filtered
"""
op = args[0]
value = args[1]
if value.startswith('regexstring:'):
value = value[len('regexstring:'):]
r = {}
for row, data in rows.items():
try:
g = re.search(value, row).group()
if op == '=':
if g == row:
r[row] = data
else:
raise report.NotImplementedError(
"In-memory "
"RowFilter doesn't support "
"the %s operation yet" % op)
except AttributeError:
pass
return r
@staticmethod
def QualifierFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'QualifierFilter' is found in
the 'filter' argument
"""
op = args[0]
value = args[1]
is_regex = False
if value.startswith('binaryprefix:'):
value = value[len('binaryprefix:'):]
if value.startswith('regexstring:'):
value = value[len('regexstring:'):]
is_regex = True
column = 'f:' + value
r = {}
for row in rows:
data = rows[row]
r_data = {}
for key in data:
if ((op == '=' and key.startswith(column)) or
(op == '>=' and key >= column) or
(op == '<=' and key <= column) or
(op == '>' and key > column) or
(op == '<' and key < column) or
(is_regex and re.search(value, key))):
r_data[key] = data[key]
else:
raise report.NotImplementedError(
"In-memory QualifierFilter "
"doesn't support the %s "
"operation yet" % op)
if r_data:
r[row] = r_data
return r
class MConnectionPool(object):
def __init__(self):
self.conn = MConnection()
def connection(self):
return self.conn
class MConnection(object):
"""HappyBase.Connection mock."""
def __init__(self):
self.tables = {}
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@staticmethod
def open():
LOG.debug(_("Opening in-memory HBase connection"))
def create_table(self, n, families=None):
families = families or {}
if n in self.tables:
return self.tables[n]
t = MTable(n, families)
self.tables[n] = t
return t
def delete_table(self, name, use_prefix=True):
del self.tables[name]
def table(self, name):
return self.create_table(name)
|
|
#!/usr/bin/env python
"""
Commands related to the syncing assets.
"""
from glob import glob
import logging
import os
from fabric.api import prompt, task
import app_config
from fnmatch import fnmatch
import utils
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
ASSETS_ROOT = 'www/assets'
@task
def sync():
"""
Intelligently synchronize assets between S3 and local folder.
"""
ignore_globs = []
with open('%s/assetsignore' % ASSETS_ROOT, 'r') as f:
ignore_globs = [l.strip() for l in f]
local_paths = []
not_lowercase = []
for local_path, subdirs, filenames in os.walk(ASSETS_ROOT):
for name in filenames:
full_path = os.path.join(local_path, name)
glob_path = full_path.split(ASSETS_ROOT)[1].strip('/')
ignore = False
for ignore_glob in ignore_globs:
if fnmatch(glob_path, ignore_glob):
ignore = True
break
if ignore:
logger.info('Ignoring: %s' % full_path)
continue
if name.lower() != name:
not_lowercase.append(full_path)
local_paths.append(full_path)
# Prevent case sensitivity differences between OSX and S3 from screwing us up
if not_lowercase:
logger.error('The following filenames are not lowercase, please change them before running `assets.sync`:')
for name in not_lowercase:
logger.error(' %s' % name)
return
bucket = utils.get_bucket(app_config.ASSETS_S3_BUCKET)
keys = bucket.list(app_config.ASSETS_SLUG)
which = None
always = False
for key in keys:
download = False
upload = False
local_path = key.name.replace(app_config.ASSETS_SLUG, ASSETS_ROOT, 1)
# Skip root key
if local_path == '%s/' % ASSETS_ROOT:
continue
logger.info(local_path)
if local_path in local_paths:
# A file can only exist once, this speeds up future checks
# and provides a list of non-existing files when complete
local_paths.remove(local_path)
# We need an actual key, not a "list key"
# http://stackoverflow.com/a/18981298/24608
key = bucket.get_key(key.name)
with open(local_path, 'rb') as f:
local_md5 = key.compute_md5(f)[0]
# Hashes are different
if key.get_metadata('md5') != local_md5:
if not always:
# Ask user which file to take
which, always = _assets_confirm(local_path)
if not which:
logger.info('Cancelling!')
return
if which == 'remote':
download = True
elif which == 'local':
upload = True
else:
download = True
if download:
_assets_download(key, local_path)
if upload:
_assets_upload(local_path, key)
action = None
always = False
# Iterate over files that didn't exist on S3
for local_path in local_paths:
key_name = local_path.replace(ASSETS_ROOT, app_config.ASSETS_SLUG, 1)
key = bucket.get_key(key_name, validate=False)
logger.info(local_path)
if not always:
action, always = _assets_upload_confirm()
if not action:
logger.info('Cancelling!')
return
if action == 'upload':
_assets_upload(local_path, key)
elif action == 'delete':
_assets_delete(local_path, key)
@task
def rm(path):
"""
Remove an asset from s3 and locally
"""
bucket = _assets_get_bucket()
file_list = glob(path)
found_folder = True
# Add files in folders, instead of folders themselves (S3 doesn't have folders)
while found_folder:
found_folder = False
for local_path in file_list:
if os.path.isdir(local_path):
found_folder = True
file_list.remove(local_path)
for path in os.listdir(local_path):
file_list.append(os.path.join(local_path, path))
if len(file_list) > 0:
utils.confirm("You are about to destroy %i files. Are you sure?" % len(file_list))
for local_path in file_list:
logger.info(local_path)
if os.path.isdir(local_path):
file_list.extend(os.listdir(local_path))
continue
key_name = local_path.replace(ASSETS_ROOT, app_config.ASSETS_SLUG, 1)
key = bucket.get_key(key_name)
_assets_delete(local_path, key)
def _assets_confirm(local_path):
"""
Check with user about whether to keep local or remote file.
"""
print('--> This file has been changed locally and on S3.')
answer = prompt('Take remote [r] Take local [l] Take all remote [ra] Take all local [la] cancel', default='c')
if answer == 'r':
return ('remote', False)
elif answer == 'l':
return ('local', False)
elif answer == 'ra':
return ('remote', True)
elif answer == 'la':
return ('local', True)
return (None, False)
def _assets_upload_confirm():
print('--> This file does not exist on S3.')
answer = prompt('Upload local copy [u] Delete local copy [d] Upload all [ua] Delete all [da] cancel', default='c')
if answer == 'u':
return ('upload', False)
elif answer == 'd':
return ('delete', False)
elif answer == 'ua':
return ('upload', True)
elif answer == 'da':
return ('delete', True)
return (None, False)
def _assets_download(s3_key, local_path):
"""
Utility method to download a single asset from S3.
"""
print('--> Downloading!')
dirname = os.path.dirname(local_path)
if not (os.path.exists(dirname)):
os.makedirs(dirname)
s3_key.get_contents_to_filename(local_path)
def _assets_upload(local_path, s3_key):
"""
Utility method to upload a single asset to S3.
"""
print('--> Uploading!')
with open(local_path, 'rb') as f:
local_md5 = s3_key.compute_md5(f)[0]
s3_key.set_metadata('md5', local_md5)
s3_key.set_contents_from_filename(local_path)
def _assets_delete(local_path, s3_key):
"""
Utility method to delete assets both locally and remotely.
"""
print('--> Deleting!')
s3_key.delete()
os.remove(local_path)
|
|
# coding=utf-8
import datetime
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from tracker.models import DischargePlan, DischargePlanForm
from tracker.models import SignatureForm
from tracker.models import Child
"""The new() function creates and processes a new DischargePlan form. It
then creates a new DischargePlan object from the DischargePlan model,
populates it with the form info, and saves it to the database. It does
the same with a Signature form, which is indexed in the DischargePlan
object, along with the Child object. It also generates a list of past
dental exam forms filled out for this child, which can be viewed from
this template. It is protected with the login_required decorator, so
that no one who isn't logged in can add a form. The new() function
renders the add_discharge_plan template.
"""
@login_required
def new(request, child_id):
child = get_object_or_404(Child, pk=child_id)
# If POST request, get posted exam and signature form.
if (request.POST):
signature_form = SignatureForm(request.POST, request.FILES,
request=request)
exam_form = DischargePlanForm(request.POST, request.FILES,
request=request)
# If user clicked discard button, discard posted form and
# render the child_information template.
if ('discard' in request.POST):
return HttpResponseRedirect(
reverse('tracker:child',
kwargs={'child_id': child_id}
))
# If user clicked 'save' or 'submit', process and save form
# (form will validate no matter what in 'save', will be
# validated in custom clean() in 'submit'), and create
# DischargePlan and Signature objects, populate them with said
# forms, and save them.
else:
if (signature_form.is_valid() and exam_form.is_valid()):
saved_signature = signature_form.cleaned_data
# Check that signature object is saved
if (saved_signature):
saved_exam = exam_form.save(commit=False)
saved_exam.signature_name = saved_signature[
'signature_name']
saved_exam.signature_surname = saved_signature[
'signature_surname']
saved_exam.signature_emp = saved_signature[
'signature_emp']
saved_exam.signature_direction = saved_signature[
'signature_direction']
saved_exam.signature_cell = saved_signature[
'signature_cell']
saved_exam.child = child
saved_exam.last_saved = datetime.datetime.utcnow()
saved_exam.save()
exam_form.save_m2m()
if (saved_exam.date):
child.is_active = False
child.discharge_date = saved_exam.date
child.save()
# Check that exam object saved
if (saved_exam):
# If user clicked 'save', render
# edit_discharge_plan template.
if ('save' in request.POST):
return HttpResponseRedirect(
reverse('tracker:edit_discharge_plan',
kwargs={
'child_id': child_id,
'exam_id': saved_exam.id
}))
# If user clicked 'submit', render
# add_discharge_plan template.
else:
return HttpResponseRedirect(
reverse('tracker:new_discharge_plan',
kwargs={'child_id': child_id}))
# If validation passed but exam still didn't save,
# return to add_discharge_plan template with "Sorry,
# please try again" error message
else:
return render(request,
'tracker/add_exam.html',
{'error_message': 'Lo sentimos, el '
'formulario no se puede guardar en '
'este momento. Por favor, vuelva a '
'intentarlo.', })
# If validation passed but signature still didn't
# save,return to add_discharge_plan template with "Sorry,
# please try again" error message
else:
return render(request, 'tracker/add_exam.html',
{'error_message': 'Lo sentimos, el '
'formulario no se puede guardar en este '
'momento. Por favor, vuelva a '
'intentarlo.', })
# If not POST request, create new DischargePlan form and Signature
# form.
else:
exam_form = DischargePlanForm(
initial={
'child': child,
'child_id': child_id,
'date': datetime.date.today(),
})
signature_form = SignatureForm()
# Render add_discharge_plan template
discharge_plan_list = DischargePlan.objects.filter(child_id=child_id)
context = {
'child': child,
'child_id': child_id,
'residence_id': child.residence_id,
'exam_form': exam_form.as_ul,
'signature_form': signature_form.as_ul,
'ExamList': discharge_plan_list,
'page': 'discharge_plan',
'name': 'Plan de Alta',
# 'exam': True,
}
return render(request, 'tracker/add_exam.html', context)
"""The view() function renders the discharge_plan template, populated
with information from the DischargePlan model. It is protected with the
login_required decorator, so that no one who isn't logged in can add a
form.
"""
@login_required
def view(request, child_id, exam_id):
exam = get_object_or_404(DischargePlan, pk=exam_id)
child = get_object_or_404(Child, pk=child_id)
if (request.POST):
# After confirmation, delete photo and render the
# add_photograph template
if ('discard' in request.POST):
exam.delete()
return HttpResponseRedirect(reverse('tracker:new_discharge_plan',
kwargs={'child_id': child_id}))
context = {
'exam': exam,
'child': child,
'child_id': child.id,
'residence_id': child.residence_id,
'page': 'discharge_plan',
# 'exam': True,
}
return render(request, 'tracker/discharge_plan.html', context)
"""The edit() function creates and processes a DischargePlan form
populated with an existing DischargePlan object information. It then adds
the edits to the DischargePlan object and saves it to the database. It
does the same with a Signature form, which is indexed in the
DischargePlan object, along with the Child object. It is protected with
the login_required decorator, so that no one who isn't logged in can
add a form. The edit() function renders the edit_discharge_plan template.
"""
@login_required
def edit(request, child_id, exam_id):
child = get_object_or_404(Child, pk=child_id)
exam = get_object_or_404(DischargePlan, pk=exam_id)
# If POST request, get posted exam and signature form.
if (request.POST):
signature_form = SignatureForm(request.POST, request.FILES,
request=request)
exam_form = DischargePlanForm(request.POST, request.FILES,
instance=exam, request=request)
# If user clicked discard button, discard posted form and
# render the child_information template.
if ('discard' in request.POST):
return HttpResponseRedirect(
reverse('tracker:child', kwargs={'child_id': child_id}))
# If user clicked 'save' or 'submit', process and save forms
# (form will validate no matter what in 'save', will be
# validated in custom clean() in 'submit'), and edit and save
# DischargePlan and Signature object.
else:
if (signature_form.is_valid() and exam_form.is_valid()):
saved_signature = signature_form.cleaned_data
# Check that signature object saved
if (saved_signature):
saved_exam = exam_form.save(commit=False)
saved_exam.signature_name = saved_signature[
'signature_name']
saved_exam.signature_surname = saved_signature[
'signature_surname']
saved_exam.signature_emp = saved_signature[
'signature_emp']
saved_exam.signature_direction = saved_signature[
'signature_direction']
saved_exam.signature_cell = saved_signature[
'signature_cell']
saved_exam.child = child
saved_exam.last_saved = datetime.datetime.utcnow()
saved_exam.save()
exam_form.save_m2m()
if (exam_form.date):
child.is_active = False
child.discharge_date = saved_exam.date
child.save()
# Check that exam object saved
if (saved_exam):
# If user clicked 'save', render
# edit_discharge_plan template.
if ('save' in request.POST):
return HttpResponseRedirect(
reverse('tracker:edit_discharge_plan',
kwargs={
'child_id': child_id,
'exam_id': saved_exam.id
}))
# If user clicked 'submit', render
# add_discharge_plan template.
else:
return HttpResponseRedirect(
reverse('tracker:new_discharge_plan',
kwargs={'child_id': child_id}))
# if validation passed but exam still didn't save,
# return to edit_discharge_plan template with "Sorry,
# please try again" error message
else:
return render(request,
'tracker/edit_exam.html',
{'error_message': 'Lo sentimos, el '
'formulario no se puede guardar en '
'este momento. Por favor, vuelva a '
'intentarlo.', })
# if validation passed but signature still didn't
# save, return to edit_discharge_plan template with
# "Sorry, please try again" error message
else:
return render(request, 'tracker/edit_exam.html',
{'error_message': 'Lo sentimos, el '
'formulario no se puede guardar en este '
'momento. Por favor, vuelva a '
'intentarlo.', })
# If not POST request, create new DischargePlan form and Signature
# form, populated with the DischargePlan and Signature objects.
else:
exam_form = DischargePlanForm(instance=exam)
signature_form = SignatureForm(initial={
'signature_name': exam.signature_name,
'signature_surname': exam.signature_surname,
'signature_emp': exam.signature_emp,
'signature_direction': exam.signature_direction,
'signature_cell': exam.signature_cell,
})
# Render edit_discharge_plan template
exam_list = DischargePlan.objects.filter(child_id=child_id)
context = {
'child': child,
'child_id': child_id,
'exam_id': exam.id,
'residence_id': child.residence_id,
'exam_form': exam_form.as_ul,
'signature_form': signature_form.as_ul,
'ExamList': exam_list,
'page': 'discharge_plan',
'name': 'Plan de Alta',
# 'exam': True,
}
return render(request, 'tracker/edit_exam.html', context)
"""The delete() function confirms with the user that a photograph
should be deleted, and then deletes the objects from the database.
This function is unused as long as javascript is enabled, as the
deletion process is done in the view() function, and the form is
rendered in a jQueryUI dialog box. This function is kept merely as a
precaution/so that it can be rebuilt for other objects without needing
to parse the view() object too carefully.
"""
def delete(request, child_id, exam_id):
# If POST request, get Photograph object, confirm deletion with
# user, and delete object
if (request.POST):
exam = get_object_or_404(DischargePlan, pk=exam_id)
child = get_object_or_404(Child, pk=child_id)
# On confirmation, delete object and load the add_photograph
# template
if ('discard' in request.POST):
exam.delete()
return HttpResponseRedirect(reverse('tracker:new_discharge_plan',
kwargs={'child_id': child_id}))
# If no confirmation, return to photograph template
elif ('no' in request.POST):
context = {
'exam': exam,
'child': child,
'child_id': child.id,
'residence_id': child.residence_id,
'page': 'discharge_plan',
}
return render(request, 'tracker/discharge_plan.html', context)
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import datetime
import logging
import six
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow import configuration
from airflow.utils.state import State
from qds_sdk.qubole import Qubole
from qds_sdk.commands import Command, HiveCommand, PrestoCommand, HadoopCommand, \
PigCommand, ShellCommand, SparkCommand, DbTapQueryCommand, DbExportCommand, \
DbImportCommand
COMMAND_CLASSES = {
"hivecmd": HiveCommand,
"prestocmd": PrestoCommand,
"hadoopcmd": HadoopCommand,
"shellcmd": ShellCommand,
"pigcmd": PigCommand,
"sparkcmd": SparkCommand,
"dbtapquerycmd": DbTapQueryCommand,
"dbexportcmd": DbExportCommand,
"dbimportcmd": DbImportCommand
}
HYPHEN_ARGS = ['cluster_label', 'app_id', 'note_id']
POSITIONAL_ARGS = ['sub_command', 'parameters']
COMMAND_ARGS = {
"hivecmd": ['query', 'script_location', 'macros', 'tags', 'sample_size',
'cluster_label', 'name'],
'prestocmd': ['query', 'script_location', 'macros', 'tags', 'cluster_label', 'name'],
'hadoopcmd': ['sub_command', 'tags', 'cluster_label', 'name'],
'shellcmd': ['script', 'script_location', 'files', 'archives', 'parameters', 'tags',
'cluster_label', 'name'],
'pigcmd': ['script', 'script_location', 'parameters', 'tags', 'cluster_label',
'name'],
'dbtapquerycmd': ['db_tap_id', 'query', 'macros', 'tags', 'name'],
'sparkcmd': ['program', 'cmdline', 'sql', 'script_location', 'macros', 'tags',
'cluster_label', 'language', 'app_id', 'name', 'arguments', 'note_id',
'user_program_arguments'],
'dbexportcmd': ['mode', 'hive_table', 'partition_spec', 'dbtap_id', 'db_table',
'db_update_mode', 'db_update_keys', 'export_dir',
'fields_terminated_by', 'tags', 'name'],
'dbimportcmd': ['mode', 'hive_table', 'dbtap_id', 'db_table', 'where_clause',
'parallelism', 'extract_query', 'boundary_query', 'split_column',
'tags', 'name']
}
class QuboleHook(BaseHook):
def __init__(self, *args, **kwargs):
conn = self.get_connection(kwargs['qubole_conn_id'])
Qubole.configure(api_token=conn.password, api_url=conn.host)
self.task_id = kwargs['task_id']
self.dag_id = kwargs['dag'].dag_id
self.kwargs = kwargs
self.cls = COMMAND_CLASSES[self.kwargs['command_type']]
self.cmd = None
@staticmethod
def handle_failure_retry(context):
ti = context['ti']
cmd_id = ti.xcom_pull(key='qbol_cmd_id', task_ids=ti.task_id)
if cmd_id is not None:
logger = logging.getLogger("QuboleHook")
cmd = Command.find(cmd_id)
if cmd is not None:
if cmd.status == 'done':
logger.info('Command ID: %s has been succeeded, hence marking this '
'TI as Success.', cmd_id)
ti.state = State.SUCCESS
elif cmd.status == 'running':
logger.info('Cancelling the Qubole Command Id: %s', cmd_id)
cmd.cancel()
def execute(self, context):
args = self.cls.parse(self.create_cmd_args(context))
self.cmd = self.cls.create(**args)
context['task_instance'].xcom_push(key='qbol_cmd_id', value=self.cmd.id)
logging.info("Qubole command created with Id: %s and Status: %s",
self.cmd.id, self.cmd.status)
while not Command.is_done(self.cmd.status):
time.sleep(Qubole.poll_interval)
self.cmd = self.cls.find(self.cmd.id)
logging.info("Command Id: %s and Status: %s", self.cmd.id, self.cmd.status)
if 'fetch_logs' in self.kwargs and self.kwargs['fetch_logs'] is True:
logging.info("Logs for Command Id: %s \n%s", self.cmd.id, self.cmd.get_log())
if self.cmd.status != 'done':
raise AirflowException('Command Id: {0} failed with Status: {1}'.format(
self.cmd.id, self.cmd.status))
def kill(self, ti):
"""
Kill (cancel) a Qubole commmand
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: response from Qubole
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
self.cmd = self.cls.find(cmd_id)
if self.cls and self.cmd:
logging.info('Sending KILL signal to Qubole Command Id: %s', self.cmd.id)
self.cmd.cancel()
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
logpath = os.path.expanduser(configuration.get('core', 'BASE_LOG_FOLDER'))
resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results'
configuration.mkdir_p(resultpath)
fp = open(resultpath + '/' + iso, 'wb')
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
self.cmd.get_results(fp, inline, delim, fetch)
fp.flush()
fp.close()
return fp.name
def get_log(self, ti):
"""
Get Logs of a command from Qubole
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: command log as text
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_log_id(self.cls, cmd_id)
def get_jobs_id(self, ti):
"""
Get jobs associated with a Qubole commands
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: Job informations assoiciated with command
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_jobs_id(self.cls, cmd_id)
def create_cmd_args(self, context):
args = []
cmd_type = self.kwargs['command_type']
inplace_args = None
tags = set([self.dag_id, self.task_id, context['run_id']])
for k,v in self.kwargs.items():
if k in COMMAND_ARGS[cmd_type]:
if k in HYPHEN_ARGS:
args.append("--{0}={1}".format(k.replace('_', '-'),v))
elif k in POSITIONAL_ARGS:
inplace_args = v
elif k == 'tags':
if isinstance(v, six.string_types):
tags.add(v)
elif isinstance(v, (list, tuple)):
for val in v:
tags.add(val)
else:
args.append("--{0}={1}".format(k,v))
if k == 'notify' and v is True:
args.append("--notify")
args.append("--tags={0}".format(','.join(filter(None,tags))))
if inplace_args is not None:
if cmd_type == 'hadoopcmd':
args += inplace_args.split(' ', 1)
else:
args += inplace_args.split(' ')
return args
|
|
# -*- coding: utf-8 -*-
import mock
from nose.tools import * # noqa (PEP8 asserts)
from box import BoxClientException
from framework.auth import Auth
from framework.exceptions import HTTPError
from website.addons.box.model import (
BoxUserSettings, BoxNodeSettings
)
from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory
from website.addons.box.tests.factories import (
BoxAccountFactory,
BoxUserSettingsFactory,
BoxNodeSettingsFactory,
)
from website.addons.base import exceptions
class TestUserSettingsModel(OsfTestCase):
def _prep_oauth_case(self):
self.node = ProjectFactory()
self.user = self.node.creator
self.external_account = BoxAccountFactory()
self.user.external_accounts.append(self.external_account)
self.user.save()
self.user_settings = self.user.get_or_add_addon('box')
def test_grant_oauth_access_no_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
self.user_settings.save()
assert_equal(
self.user_settings.oauth_grants,
{self.node._id: {self.external_account._id: {}}},
)
def test_grant_oauth_access_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
assert_equal(
self.user_settings.oauth_grants,
{
self.node._id: {
self.external_account._id: {'folder': 'fake_folder_id'}
},
}
)
def test_verify_oauth_access_no_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
self.user_settings.save()
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account
)
)
assert_false(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=BoxAccountFactory()
)
)
def test_verify_oauth_access_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
)
assert_false(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'another_folder_id'}
)
)
class TestBoxNodeSettingsModel(OsfTestCase):
def setUp(self):
super(TestBoxNodeSettingsModel, self).setUp()
self.node = ProjectFactory()
self.user = self.node.creator
self.external_account = BoxAccountFactory()
self.user.add_addon('box')
self.user.external_accounts.append(self.external_account)
self.user.save()
self.user_settings = self.user.get_addon('box')
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
self.node_settings = BoxNodeSettingsFactory(
user_settings=self.user_settings,
folder_id='1234567890',
owner=self.node
)
self.node_settings.external_account = self.external_account
self.node_settings.save()
def tearDown(self):
super(TestBoxNodeSettingsModel, self).tearDown()
self.user_settings.remove()
self.node_settings.remove()
self.external_account.remove()
self.node.remove()
self.user.remove()
def test_complete_true(self):
assert_true(self.node_settings.has_auth)
assert_true(self.node_settings.complete)
def test_complete_false(self):
self.user_settings.oauth_grants[self.node._id].pop(self.external_account._id)
assert_true(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
def test_complete_auth_false(self):
self.node_settings.user_settings = None
assert_false(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
def test_fields(self):
node_settings = BoxNodeSettings(user_settings=self.user_settings)
node_settings.save()
assert_true(node_settings.user_settings)
assert_equal(node_settings.user_settings.owner, self.user)
assert_true(hasattr(node_settings, 'folder_id'))
assert_true(hasattr(node_settings, 'user_settings'))
def test_folder_defaults_to_none(self):
node_settings = BoxNodeSettings(user_settings=self.user_settings)
node_settings.save()
assert_is_none(node_settings.folder_id)
def test_has_auth(self):
self.user.external_accounts = []
self.user_settings.reload()
settings = BoxNodeSettings(user_settings=self.user_settings)
settings.save()
assert_false(settings.has_auth)
self.user.external_accounts.append(self.external_account)
settings.reload()
assert_true(settings.has_auth)
def test_clear_auth(self):
node_settings = BoxNodeSettingsFactory()
node_settings.external_account = BoxAccountFactory()
node_settings.user_settings = BoxUserSettingsFactory()
node_settings.save()
node_settings.clear_auth()
assert_is_none(node_settings.external_account)
assert_is_none(node_settings.folder_id)
assert_is_none(node_settings.user_settings)
def test_to_json(self):
settings = self.node_settings
user = UserFactory()
result = settings.to_json(user)
assert_equal(result['addon_short_name'], 'box')
def test_delete(self):
assert_true(self.node_settings.user_settings)
assert_true(self.node_settings.folder_id)
old_logs = self.node.logs
self.node_settings.delete()
self.node_settings.save()
assert_is(self.node_settings.user_settings, None)
assert_is(self.node_settings.folder_id, None)
assert_true(self.node_settings.deleted)
assert_equal(self.node.logs, old_logs)
def test_deauthorize(self):
assert_true(self.node_settings.user_settings)
assert_true(self.node_settings.folder_id)
self.node_settings.deauthorize(auth=Auth(self.user))
self.node_settings.save()
assert_is(self.node_settings.user_settings, None)
assert_is(self.node_settings.folder_id, None)
last_log = self.node.logs[-1]
assert_equal(last_log.action, 'box_node_deauthorized')
params = last_log.params
assert_in('node', params)
assert_in('project', params)
assert_in('folder_id', params)
@mock.patch("website.addons.box.model.BoxNodeSettings._update_folder_data")
def test_set_folder(self, mock_update_folder):
folder_id = '1234567890'
self.node_settings.set_folder(folder_id, auth=Auth(self.user))
self.node_settings.save()
# Folder was set
assert_equal(self.node_settings.folder_id, folder_id)
# Log was saved
last_log = self.node.logs[-1]
assert_equal(last_log.action, 'box_folder_selected')
def test_set_user_auth(self):
node_settings = BoxNodeSettingsFactory()
user_settings = BoxUserSettingsFactory()
external_account = BoxAccountFactory()
user_settings.owner.external_accounts.append(external_account)
user_settings.save()
node_settings.external_account = external_account
node_settings.set_user_auth(user_settings)
node_settings.save()
assert_true(node_settings.has_auth)
assert_equal(node_settings.user_settings, user_settings)
# A log was saved
last_log = node_settings.owner.logs[-1]
assert_equal(last_log.action, 'box_node_authorized')
log_params = last_log.params
assert_equal(log_params['folder_id'], node_settings.folder_id)
assert_equal(log_params['node'], node_settings.owner._primary_key)
assert_equal(last_log.user, user_settings.owner)
@mock.patch("website.addons.box.model.refresh_oauth_key")
def test_serialize_credentials(self, mock_refresh):
mock_refresh.return_value = True
self.user_settings.access_token = 'key-11'
self.user_settings.save()
credentials = self.node_settings.serialize_waterbutler_credentials()
expected = {'token': self.node_settings.user_settings.access_token}
assert_equal(credentials, expected)
def test_serialize_credentials_not_authorized(self):
self.node_settings.user_settings = None
self.node_settings.save()
with assert_raises(exceptions.AddonError):
self.node_settings.serialize_waterbutler_credentials()
def test_serialize_settings(self):
settings = self.node_settings.serialize_waterbutler_settings()
expected = {'folder': self.node_settings.folder_id}
assert_equal(settings, expected)
def test_serialize_settings_not_configured(self):
self.node_settings.folder_id = None
self.node_settings.save()
with assert_raises(exceptions.AddonError):
self.node_settings.serialize_waterbutler_settings()
def test_create_log(self):
action = 'file_added'
path = 'pizza.nii'
nlog = len(self.node.logs)
self.node_settings.create_waterbutler_log(
auth=Auth(user=self.user),
action=action,
metadata={'path': path, 'materialized': path},
)
self.node.reload()
assert_equal(len(self.node.logs), nlog + 1)
assert_equal(
self.node.logs[-1].action,
'box_{0}'.format(action),
)
assert_equal(
self.node.logs[-1].params['path'],
path
)
class TestNodeSettingsCallbacks(OsfTestCase):
def setUp(self):
super(TestNodeSettingsCallbacks, self).setUp()
# Create node settings with auth
self.user_settings = BoxUserSettingsFactory(access_token='123abc')
self.node_settings = BoxNodeSettingsFactory(
user_settings=self.user_settings,
)
self.external_account = BoxAccountFactory()
self.user_settings.owner.external_accounts.append(self.external_account)
self.node_settings.external_account = self.external_account
self.project = self.node_settings.owner
self.user = self.user_settings.owner
self.user_settings.grant_oauth_access(
node=self.project,
external_account=self.external_account,
)
def test_after_fork_by_authorized_box_user(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
node=self.project, fork=fork, user=self.user_settings.owner
)
print(message)
assert_equal(clone.user_settings, self.user_settings)
def test_after_fork_by_unauthorized_box_user(self):
fork = ProjectFactory()
user = UserFactory()
clone, message = self.node_settings.after_fork(
node=self.project, fork=fork, user=user,
save=True
)
# need request context for url_for
assert_is(clone.user_settings, None)
def test_before_fork(self):
node = ProjectFactory()
message = self.node_settings.before_fork(node, self.user)
assert_true(message)
def test_before_remove_contributor_message(self):
message = self.node_settings.before_remove_contributor(
self.project, self.user)
assert_true(message)
assert_in(self.user.fullname, message)
assert_in(self.project.project_or_component, message)
def test_after_remove_authorized_box_user_not_self(self):
message = self.node_settings.after_remove_contributor(
self.project, self.user_settings.owner)
self.node_settings.save()
assert_is_none(self.node_settings.user_settings)
assert_true(message)
assert_in("You can re-authenticate", message)
def test_after_remove_authorized_box_user_self(self):
auth = Auth(user=self.user_settings.owner)
message = self.node_settings.after_remove_contributor(
self.project, self.user_settings.owner, auth)
self.node_settings.save()
assert_is_none(self.node_settings.user_settings)
assert_true(message)
assert_not_in("You can re-authenticate", message)
def test_after_delete(self):
self.project.remove_node(Auth(user=self.project.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_is_none(self.node_settings.user_settings)
assert_is_none(self.node_settings.folder_id)
|
|
# -*- coding: utf-8 -*-
import os
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score, f1_score
import xgboost as xgb
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from data_load import load_data, load_data_tgt
# Load data
data_dir = '/home/ilya/code/ml4vs/data/LMC_SC20__corrected_list_of_variables/raw_index_values'
file_1 = 'vast_lightcurve_statistics_variables_only.log'
file_0 = 'vast_lightcurve_statistics_constant_only.log'
file_0 = os.path.join(data_dir, file_0)
file_1 = os.path.join(data_dir, file_1)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
# names_to_delete = ['Magnitude', 'meaningless_1', 'meaningless_2', 'star_ID',
# 'Npts', 'CSSD']
names_to_delete = ['meaningless_1', 'meaningless_2', 'star_ID',
'Npts', 'CSSD', 'clipped_sigma', 'lag1', 'L', 'Lclp', 'Jclp',
'MAD', 'Ltim']
X, y, df, features_names, delta = load_data([file_0, file_1], names, names_to_delete)
target = 'variable'
predictors = list(df)
predictors.remove(target)
dtrain = df
kfold = StratifiedKFold(n_splits=4, shuffle=True, random_state=123)
def xg_f1(y, t):
t = t.get_label()
# Binaryzing your output
y_bin = [1. if y_cont > 0.5 else 0. for y_cont in y]
return 'f1', 1-f1_score(t, y_bin)
def objective(space):
clf = xgb.XGBClassifier(n_estimators=10000, learning_rate=0.1,
max_depth=space['max_depth'],
min_child_weight=space['min_child_weight'],
subsample=space['subsample'],
colsample_bytree=space['colsample_bytree'],
colsample_bylevel=space['colsample_bylevel'],
gamma=space['gamma'],
scale_pos_weight=space['scale_pos_weight'])
# scale_pos_weight=space['scale_pos_weight'])
# Try using pipeline
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
# estimators.append(('scaler', StandardScaler()))
estimators.append(('clf', clf))
pipeline = Pipeline(estimators)
best_n = ""
# CMs = list()
aprs = list()
for train_indx, test_indx in kfold.split(dtrain[predictors].index,
dtrain['variable']):
train = dtrain.iloc[train_indx]
valid = dtrain.iloc[test_indx]
# X_test
valid_ = valid[predictors]
# X_train
train_ = train[predictors]
for name, transform in pipeline.steps[:-1]:
transform.fit(train_)
# X_test
valid_ = transform.transform(valid_)
# X_train
train_ = transform.transform(train_)
eval_set = [(train_, train['variable']),
(valid_, valid['variable'])]
# TODO: Try ES on default eval. metric or AUC!!!
pipeline.fit(train[predictors], train['variable'],
clf__eval_set=eval_set, clf__eval_metric="map",
# clf__eval_set=eval_set, clf__eval_metric=xg_f1,
clf__early_stopping_rounds=50)
pred = pipeline.predict_proba(valid[predictors])[:, 1]
aps = average_precision_score(valid['variable'], pred)
aprs.append(aps)
# CMs.append(confusion_matrix(y[test_indx], pred))
best_n = best_n + " " + str(clf.best_ntree_limit)
# CM = np.sum(CMs, axis=0)
# FN = CM[1][0]
# TP = CM[1][1]
# FP = CM[0][1]
# print "TP = {}".format(TP)
# print "FP = {}".format(FP)
# print "FN = {}".format(FN)
# f1 = 2. * TP / (2. * TP + FP + FN)
APR = np.mean(aprs)
print "=== APR : {} ===".format(APR)
return{'loss': 1-APR, 'status': STATUS_OK ,
'attachments': {'best_n': best_n}}
space ={
'max_depth': hp.choice("x_max_depth", np.arange(5, 12, 1, dtype=int)),
'min_child_weight': hp.quniform('x_min_child', 1, 20, 1),
'subsample': hp.quniform('x_subsample', 0.5, 1, 0.025),
'colsample_bytree': hp.quniform('x_csbtree', 0.25, 1, 0.025),
'colsample_bylevel': hp.quniform('x_csblevel', 0.25, 1, 0.025),
'gamma': hp.quniform('x_gamma', 0.0, 1, 0.025),
'scale_pos_weight': hp.qloguniform('x_spweight', 0, 6, 1),
# 'lr': hp.quniform('lr', 0.001, 0.5, 0.025)
# 'lr': hp.loguniform('lr', -7, -1)
}
trials = Trials()
best = fmin(fn=objective,
space=space,
algo=tpe.suggest,
max_evals=500,
trials=trials)
import hyperopt
print hyperopt.space_eval(space, best)
best_pars = hyperopt.space_eval(space, best)
best_n = trials.attachments['ATTACH::{}::best_n'.format(trials.best_trial['tid'])]
best_n = max([int(n) for n in best_n.strip().split(' ')])
clf = xgb.XGBClassifier(n_estimators=int(1.25 * best_n),
learning_rate=0.1,
max_depth=best_pars['max_depth'],
min_child_weight=best_pars['min_child_weight'],
subsample=best_pars['subsample'],
colsample_bytree=best_pars['colsample_bytree'],
colsample_bylevel=best_pars['colsample_bylevel'],
gamma=best_pars['gamma'],
scale_pos_weight=best_pars['scale_pos_weight'])
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('clf', clf))
pipeline = Pipeline(estimators)
# Fit classifier with best hyperparameters on whole data set
pipeline.fit(dtrain[predictors], dtrain['variable'])
# Load blind test data
file_tgt = 'LMC_SC19_PSF_Pgood98__vast_lightcurve_statistics.log'
file_tgt = os.path.join(data_dir, file_tgt)
X_tgt, feature_names, df, df_orig = load_data_tgt(file_tgt, names, names_to_delete,
delta)
y_probs = pipeline.predict_proba(df[predictors])[:, 1]
idx = y_probs > 0.5
idx_ = y_probs < 0.5
gb_no = list(df_orig['star_ID'][idx_])
print("Found {} variables".format(np.count_nonzero(idx)))
with open('gb_results.txt', 'w') as fo:
for line in list(df_orig['star_ID'][idx]):
fo.write(line + '\n')
# Check F1
with open('clean_list_of_new_variables.txt', 'r') as fo:
news = fo.readlines()
news = [line.strip().split(' ')[1] for line in news]
news = set(news)
with open('gb_results.txt', 'r') as fo:
gb = fo.readlines()
gb = [line.strip().split('_')[4].split('.')[0] for line in gb]
gb = set(gb)
print "Among new vars found {}".format(len(news.intersection(gb)))
with open('candidates_50perc_threshold.txt', 'r') as fo:
c50 = fo.readlines()
c50 = [line.strip("\", ', \", \n, }, {") for line in c50]
with open('variables_not_in_catalogs.txt', 'r') as fo:
not_in_cat = fo.readlines()
nic = [line.strip().split(' ')[1] for line in not_in_cat]
# Catalogue variables
cat_vars = set(c50).difference(set(nic))
# Non-catalogue variable
noncat_vars = set([line.strip().split(' ')[1] for line in not_in_cat if 'CST' not in line])
# All variables
all_vars = news.union(cat_vars).union(noncat_vars)
gb_no = set([line.strip().split('_')[4].split('.')[0] for line in gb_no])
found_bad = '181193' in gb
print "Found known variable : ", found_bad
FN = len(gb_no.intersection(all_vars))
TP = len(all_vars.intersection(gb))
TN = len(gb_no) - FN
FP = len(gb) - TP
recall = float(TP) / (TP + FN)
precision = float(TP) / (TP + FP)
F1 = 2 * precision * recall / (precision + recall)
print "precision: {}".format(precision)
print "recall: {}".format(recall)
print "F1: {}".format(F1)
print "TN={}, FP={}".format(TN, FP)
print "FN={}, TP={}".format(FN, TP)
|
|
"""Support for non-delivered packages recorded in AfterShip."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Information provided by AfterShip"
ATTR_TRACKINGS = "trackings"
BASE = "https://track.aftership.com/"
CONF_SLUG = "slug"
CONF_TITLE = "title"
CONF_TRACKING_NUMBER = "tracking_number"
DEFAULT_NAME = "aftership"
UPDATE_TOPIC = DOMAIN + "_update"
ICON = "mdi:package-variant-closed"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
SERVICE_ADD_TRACKING = "add_tracking"
SERVICE_REMOVE_TRACKING = "remove_tracking"
ADD_TRACKING_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_TRACKING_NUMBER): cv.string,
vol.Optional(CONF_TITLE): cv.string,
vol.Optional(CONF_SLUG): cv.string,
}
)
REMOVE_TRACKING_SERVICE_SCHEMA = vol.Schema(
{vol.Required(CONF_SLUG): cv.string, vol.Required(CONF_TRACKING_NUMBER): cv.string}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the AfterShip sensor platform."""
from pyaftership.tracker import Tracking
apikey = config[CONF_API_KEY]
name = config[CONF_NAME]
session = async_get_clientsession(hass)
aftership = Tracking(hass.loop, session, apikey)
await aftership.get_trackings()
if not aftership.meta or aftership.meta["code"] != 200:
_LOGGER.error(
"No tracking data found. Check API key is correct: %s", aftership.meta
)
return
instance = AfterShipSensor(aftership, name)
async_add_entities([instance], True)
async def handle_add_tracking(call):
"""Call when a user adds a new Aftership tracking from HASS."""
title = call.data.get(CONF_TITLE)
slug = call.data.get(CONF_SLUG)
tracking_number = call.data[CONF_TRACKING_NUMBER]
await aftership.add_package_tracking(tracking_number, title, slug)
async_dispatcher_send(hass, UPDATE_TOPIC)
hass.services.async_register(
DOMAIN,
SERVICE_ADD_TRACKING,
handle_add_tracking,
schema=ADD_TRACKING_SERVICE_SCHEMA,
)
async def handle_remove_tracking(call):
"""Call when a user removes an Aftership tracking from HASS."""
slug = call.data[CONF_SLUG]
tracking_number = call.data[CONF_TRACKING_NUMBER]
await aftership.remove_package_tracking(slug, tracking_number)
async_dispatcher_send(hass, UPDATE_TOPIC)
hass.services.async_register(
DOMAIN,
SERVICE_REMOVE_TRACKING,
handle_remove_tracking,
schema=REMOVE_TRACKING_SERVICE_SCHEMA,
)
class AfterShipSensor(Entity):
"""Representation of a AfterShip sensor."""
def __init__(self, aftership, name):
"""Initialize the sensor."""
self._attributes = {}
self._name = name
self._state = None
self.aftership = aftership
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return "packages"
@property
def device_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
async def async_added_to_hass(self):
"""Register callbacks."""
self.hass.helpers.dispatcher.async_dispatcher_connect(
UPDATE_TOPIC, self.force_update
)
async def force_update(self):
"""Force update of data."""
await self.async_update(no_throttle=True)
await self.async_update_ha_state()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self, **kwargs):
"""Get the latest data from the AfterShip API."""
await self.aftership.get_trackings()
if not self.aftership.meta:
_LOGGER.error("Unknown errors when querying")
return
if self.aftership.meta["code"] != 200:
_LOGGER.error(
"Errors when querying AfterShip. %s", str(self.aftership.meta)
)
return
status_to_ignore = {"delivered"}
status_counts = {}
trackings = []
not_delivered_count = 0
for track in self.aftership.trackings["trackings"]:
status = track["tag"].lower()
name = (
track["tracking_number"] if track["title"] is None else track["title"]
)
last_checkpoint = (
"Shipment pending"
if track["tag"] == "Pending"
else track["checkpoints"][-1]
)
status_counts[status] = status_counts.get(status, 0) + 1
trackings.append(
{
"name": name,
"tracking_number": track["tracking_number"],
"slug": track["slug"],
"link": "%s%s/%s" % (BASE, track["slug"], track["tracking_number"]),
"last_update": track["updated_at"],
"expected_delivery": track["expected_delivery"],
"status": track["tag"],
"last_checkpoint": last_checkpoint,
}
)
if status not in status_to_ignore:
not_delivered_count += 1
else:
_LOGGER.debug("Ignoring %s as it has status: %s", name, status)
self._attributes = {
ATTR_ATTRIBUTION: ATTRIBUTION,
**status_counts,
ATTR_TRACKINGS: trackings,
}
self._state = not_delivered_count
|
|
#!/usr/bin/env python
#--------------------------------------------------------------
# The functions for the lattice modifications to replace the base
# RF gap nodes with The Axis Fields nodes that can overlap with
# quadrupoles.
# The initial base RF gap nodes have zero length, and the length
# of new nodes is defined by the RF fields on the RF gap axis.
# We also have to adjust these lengths to avoid overlapping RF fields.
# This correction will be on the level of microns, and it will
# not change the strength of the fields.
# The RF fields can overlap the zero length elements like markers or
# dipole correctors. The whole RF gap field will be chopped into pieces.
# The RF fields also can overlap the fields one or several quads, and
# these quads' fields will be included into the RF gap node to use
# during the bunch tracking.
#--------------------------------------------------------------
import math
import sys
import os
import time
# import from orbit Python utilities
from orbit.utils import orbitFinalize
from orbit.py_linac.lattice import LinacApertureNode
from orbit.py_linac.lattice import Quad, Drift
from orbit.py_linac.lattice import BaseRF_Gap, AxisFieldRF_Gap
from orbit.py_linac.lattice import OverlappingQuadsNode
from orbit.py_linac.lattice import AxisField_and_Quad_RF_Gap
from orbit.py_linac.overlapping_fields import EngeFunction
from orbit_utils import Function
from rf_models_modifications_lib import Make_AxisFieldRF_Gaps_and_Find_Neihbor_Nodes
def GetEngeFunction(quad):
"""
This is an example of a EngeFunctionFactory function.
It returns the EngeFunction function for the instance of Quad node.
The quad should have the aperture parameter.
It is used in the quad+rf lattice transformation by default.
User can prepare his/her own EngeFunctionFactory function.
"""
length_param = quad.getLength()
if(quad.hasParam("aperture")):
acceptance_diameter_param = quad.getParam("aperture")
cutoff_level = 0.001
func = EngeFunction(length_param,acceptance_diameter_param,cutoff_level)
return func
else:
msg = "Inside the Replace_BaseRF_Gap_and_Quads_to_Overlapping_Nodes Python function. "
msg += os.linesep
msg += "Cannot create the EngeFunction for the quad!"
msg += os.linesep
msg = msg + "quad name = " + quad.getName()
msg = msg + os.linesep
msg = msg + "It does not have the aperture parameter!"
msg = msg + os.linesep
orbitFinalize(msg)
return None
def Replace_BaseRF_Gap_and_Quads_to_Overlapping_Nodes(\
accLattice,\
z_step,\
dir_location="",\
accSeq_Names = [],\
cavs_Names = [], \
EngeFunctionFactory = GetEngeFunction):
"""
Function will replace BaseRF_Gap nodes by AxisField_and_Quad_RF_Gap.
It is assumed that AxisField_and_Quad_RF_Gap nodes do not overlap any
others nodes (except Drifts). The location of the axis field
data files are specified by dir_location input variable.
The replacement will be performed only for specified sequences.
If the cavities list is empty, all of them will be replaced!
If you want to replace the nodes in a particular cavity please specify it!
The dir_location is the location of the directory with the axis field
files.
We assume that the RF gap field overlaps only with the fields of
two neighboring quads.
z_step defines the longitudinal step during the tracking through the
elements with overlapping fields.
The magnetic field longitudinal dependency in quads will be described
by Enge Functions that will be produced by the GetEngeFunction function
by default. The user can supply his/her own factory for these functions.
"""
#-----------------------------------------------------------------------------
"""
node_pos_dict = accLattice.getNodePositionsDict()
#for node_ind in range(len(accLattice.getNodes())):
for node_ind in range(199,298):
node = accLattice.getNodes()[node_ind]
#if(not isinstance(node,Quad)): continue
(pos_start,pos_end) = node_pos_dict[node]
print "debug ind=",node_ind," node=",node.getName()," (pos_start,pos_end)=",(pos_start,pos_end)
"""
#-----------------------------------------------------------------------------
#---- rf_length_tolerance RF fields should not overlap more than this value
rf_length_tolerance = 0.0001
drift_length_tolerance = 0.00000001
node_pos_dict = accLattice.getNodePositionsDict()
for accSeq_Name in accSeq_Names:
accSeq = accLattice.getSequence(accSeq_Name)
#print "debug ================== START seq=",accSeq.getName()
if(accSeq == None):
msg = "The Replace_BaseRF_Gap_and_Quads_to_Overlapping_Nodes Python function. "
msg += os.linesep
msg += "Cannot find the acc. sequence with this name in the lattice!"
msg += os.linesep
msg = msg + "accSeq name = " + accSeq_Name
msg = msg + os.linesep
msg = msg + "lattice name = " + accLattice.getName()
msg = msg + os.linesep
orbitFinalize(msg)
#--------------------------------------------------------------
nodes = accSeq.getNodes()
#--- just for case: if the nodes are not in the right order
nodes = sorted(nodes, key = lambda x: x.getPosition(), reverse = False)
#---- create Enge Functions' dictionary by using the usual quad nodes as keys
enge_func_quad_dict = {}
quads = accLattice.getQuads(accSeq)
for quad in quads:
enge_func_quad_dict[quad] = EngeFunctionFactory(quad)
#--------------------------------------------
cavs = accSeq.getRF_Cavities()
if(len(cavs_Names) > 0):
cavs_tmp = []
for cav in cavs:
if(cav.getName() in cavs_Names):
cavs_tmp.append(cav)
cavs = cavs_tmp
#---- let's check that all rf gaps are BaseRF_Gap instances
#---- and create the dictionaries to account for new rf gaps later
#---- rf_gap_to_cavs_dict[rf_gap] = cav
#---- new_rf_gaps_arr_in_cav_dict[cav] = [new_AxisField_and_Quad_RF_Gap_0,..]
rf_gap_to_cavs_dict = {}
new_rf_gaps_arr_in_cav_dict = {}
for cav in cavs:
rf_gaps = cav.getRF_GapNodes()
new_rf_gaps_arr_in_cav_dict[cav] = []
for rf_gap in rf_gaps:
rf_gap_to_cavs_dict[rf_gap] = cav
if(not isinstance(rf_gap,BaseRF_Gap)):
msg = "The Replace_BaseRF_Gap_and_Quads_to_Overlapping_Nodes function. "
msg += "You are trying to replace the RF gap which is not BaseRF_Gap instance!"
msg += os.linesep
msg = msg + "RF Gap =" + rf_gap.getName()
msg = msg + os.linesep
msg = msg + "Type of gap node=" + rf_gap.getType()
msg = msg + os.linesep
orbitFinalize(msg)
#---- af_rf_gap_dict[rf_gap] = AxisFieldRF_Gap(rf_gap)
#---- rf_gap_ind_up_down_arr[[rf_gap,gap_ind,drift_down_ind,drift_up_ind],...]
(af_rf_gap_dict,rf_gap_ind_up_down_arr) = Make_AxisFieldRF_Gaps_and_Find_Neihbor_Nodes(rf_length_tolerance,accLattice,accSeq,dir_location,cavs)
if(len(rf_gap_ind_up_down_arr) == 0):
msg = "The Replace_BaseRF_Gap_and_Quads_to_Overlapping_Nodes function. "
msg += "This Acc. Sequence does not have BaseRF_Gaps!"
msg += os.linesep
msg += "It is better to use another method of the lattice modification!"
msg += os.linesep
msg = msg + "Acc Sequence =" + accSeq.getName()
msg = msg + os.linesep
orbitFinalize(msg)
#-----------------------------------------------------
#---- Now we are going to build a new lattice with AxisField_and_Quad_RF_Gap
#---- and OverlappingQuadsNode classes. Even drifts will be replaced with
#---- these nodes to simplify the lattice structure. The original lattice
#---- should have only drifts, quads, and BaseRF_Gaps as elements with
#---- non-zero length. If the user wants to include other sources of the EM
#---- fields, he/she has to create or modify the classes responsible for
#---- handling the overlapping EM fields.
#---- We will generate new nodes in an arbitrary order, but at the end
#---- we will sort them according to their position.
#-----------------------------------------------------
new_nodes = []
#-------------------------------------------------------------------------
#---- 1st STEP - nodes from the beginning to the first RF gap
[rf_gap,gap_ind,drift_down_ind,drift_up_ind] = rf_gap_ind_up_down_arr[0]
node_ind_start = 0
node_ind_end = drift_down_ind
(node_pos_start,pos_tmp) = node_pos_dict[nodes[node_ind_start]]
accSeq_z_start = node_pos_start
(rf_gap_pos_start, rf_gap_pos_end) = node_pos_dict[rf_gap]
(z_gap_min,z_gap_max) = af_rf_gap_dict[rf_gap].getZ_Min_Max()
(rf_gap_pos_start, rf_gap_pos_end) = (rf_gap_pos_start+z_gap_min, rf_gap_pos_end+z_gap_max)
#print "debug rf gap=",rf_gap.getName()," (rf_gap_pos_start, rf_gap_pos_end)=",(rf_gap_pos_start, rf_gap_pos_end)
#print "debug (node_ind_start,node_ind_end) = ",(node_ind_start,node_ind_end)
#for ind in range(node_ind_start,rf_gap_ind_up_down_arr[0][1]+1):
# print "debug ind =",ind," node=",nodes[ind].getName()," (node_pos_start,node_pos_end)=",node_pos_dict[nodes[ind]]
node_pos_start = node_pos_start - accSeq_z_start
node_pos_end = rf_gap_pos_start - accSeq_z_start
#---- if the length of the node is equal to 0. we will add only the zero length elements
(local_quads,zero_length_nodes) = Get_quads_zeroLengthNodes_in_range(accSeq,node_ind_start,node_ind_end)
if(abs(node_pos_start - node_pos_end) > drift_length_tolerance):
#print "debug before n_quads=",len(local_quads)," n_markers=",len(zero_length_nodes)
pos_start = node_pos_start
pos_end = node_pos_end
ovrlp_count = 0
for ind in range(len(zero_length_nodes)+1):
node = OverlappingQuadsNode()
name = rf_gap.getName()+":Before:"+str(ovrlp_count+1)+":"+node.getType()
node.setName(name)
if(ind == len(zero_length_nodes)):
pos_end = node_pos_end
else:
pos_end = zero_length_nodes[ind].getPosition()
length = pos_end - pos_start
if(abs(length) < drift_length_tolerance):
if(ind < len(zero_length_nodes)):
new_nodes.append(zero_length_nodes[ind])
pos_start = pos_end
continue
pos = (pos_end + pos_start)/2
node.setLength(length)
node.setPosition(pos)
for quad in local_quads:
node.addQuad(quad,enge_func_quad_dict[quad],quad.getPosition()-pos_start)
node.setZ_Step(z_step)
nParts = int(length/z_step)+1
node.setnParts(nParts)
new_nodes.append(node)
ovrlp_count += 1
if(ind < len(zero_length_nodes)):
new_nodes.append(zero_length_nodes[ind])
pos_start = pos_end
else:
new_nodes += zero_length_nodes
#--------------------------------------------------------------------------
#---- 2st STEP - nodes from the last RF gap to the end of the Acc. Sequence
[rf_gap,gap_ind,drift_down_ind,drift_up_ind] = rf_gap_ind_up_down_arr[len(rf_gap_ind_up_down_arr)-1]
node_ind_start = drift_up_ind
node_ind_end = len(nodes) - 1
(rf_gap_pos_start, rf_gap_pos_end) = node_pos_dict[rf_gap]
(z_gap_min,z_gap_max) = af_rf_gap_dict[rf_gap].getZ_Min_Max()
(rf_gap_pos_start, rf_gap_pos_end) = (rf_gap_pos_start+z_gap_min, rf_gap_pos_end+z_gap_max)
node_pos_start = rf_gap_pos_end
(node_pos_tmp,node_pos_end) = node_pos_dict[nodes[node_ind_end]]
(local_quads,zero_length_nodes) = Get_quads_zeroLengthNodes_in_range(accSeq,node_ind_start,node_ind_end)
if(abs(node_pos_start - node_pos_end) > drift_length_tolerance):
#print "debug after n_quads=",len(local_quads)," n_markers=",len(zero_length_nodes)
pos_start = node_pos_start - accSeq_z_start
pos_end = node_pos_end - accSeq_z_start
ovrlp_count = 0
for ind in range(len(zero_length_nodes)+1):
node = OverlappingQuadsNode()
name = rf_gap.getName()+":After:"+str(ovrlp_count+1)+":"+node.getType()
node.setName(name)
if(ind == len(zero_length_nodes)):
pos_end = node_pos_end - accSeq_z_start
else:
pos_end = zero_length_nodes[ind].getPosition()
length = pos_end - pos_start
if(abs(length) < drift_length_tolerance):
if(ind < len(zero_length_nodes)):
new_nodes.append(zero_length_nodes[ind])
pos_start = pos_end
continue
pos = (pos_end + pos_start)/2
node.setLength(length)
node.setPosition(pos)
for quad in local_quads:
node.addQuad(quad,enge_func_quad_dict[quad],quad.getPosition()-pos_start)
node.setZ_Step(z_step)
nParts = int(length/z_step)+1
node.setnParts(nParts)
new_nodes.append(node)
ovrlp_count += 1
if(ind < len(zero_length_nodes)):
new_nodes.append(zero_length_nodes[ind])
pos_start = pos_end
else:
new_nodes += zero_length_nodes
#--------------------------------------------------------------------------
#---- 3st STEP - nodes of AxisField_and_Quad_RF_Gap type - new RF gaps
#---- rf_gap_to_cavs_dict[rf_gap] = cav
#---- new_rf_gaps_arr_in_cav_dict[cav] = [new_AxisField_and_Quad_RF_Gap_0,..]
n_rf_gaps = len(rf_gap_ind_up_down_arr)
for local_gap_ind in range(n_rf_gaps):
[rf_gap,gap_ind,drift_down_ind,drift_up_ind] = rf_gap_ind_up_down_arr[local_gap_ind]
cav = rf_gap_to_cavs_dict[rf_gap]
new_rf_gaps_arr = new_rf_gaps_arr_in_cav_dict[cav]
axisFieldRF_Gap = af_rf_gap_dict[rf_gap]
(local_quads,zero_length_nodes) = Get_quads_zeroLengthNodes_in_range(accSeq,drift_down_ind,drift_up_ind)
local_quads = Find_Neihbor_Quads(accLattice,accSeq,rf_gap,gap_ind,af_rf_gap_dict,enge_func_quad_dict)
(rf_gap_pos_start, rf_gap_pos_end) = node_pos_dict[rf_gap]
rf_gap_pos_zero = rf_gap_pos_start - accSeq_z_start
(z_gap_min,z_gap_max) = axisFieldRF_Gap.getZ_Min_Max()
(rf_gap_pos_start, rf_gap_pos_end) = (rf_gap_pos_start+z_gap_min, rf_gap_pos_end+z_gap_max)
pos_start = rf_gap_pos_start - accSeq_z_start
pos_end = rf_gap_pos_end - accSeq_z_start
#--------------------------------------------------------
ovrlp_count = 0
for ind in range(len(zero_length_nodes)+1):
axisField_and_Quad_RF_Gap = AxisField_and_Quad_RF_Gap(axisFieldRF_Gap)
name = rf_gap.getName()+":"+axisField_and_Quad_RF_Gap.getType()+":"+str(ovrlp_count+1)
new_rf_gaps_arr.append(axisField_and_Quad_RF_Gap)
axisField_and_Quad_RF_Gap.setName(name)
if(ind == len(zero_length_nodes)):
pos_end = rf_gap_pos_end - accSeq_z_start
else:
pos_end = zero_length_nodes[ind].getPosition()
length = pos_end - pos_start
if(abs(length) < drift_length_tolerance):
if(ind < len(zero_length_nodes)):
new_nodes.append(zero_length_nodes[ind])
pos_start = pos_end
continue
pos = (pos_end + pos_start)/2
z_min = pos_start - rf_gap_pos_zero
z_max = pos_end - rf_gap_pos_zero
axisField_and_Quad_RF_Gap.setZ_Min_Max(z_min,z_max)
axisField_and_Quad_RF_Gap.setPosition(pos)
for quad in local_quads:
axisField_and_Quad_RF_Gap.addQuad(quad,enge_func_quad_dict[quad],quad.getPosition() - rf_gap_pos_zero)
axisField_and_Quad_RF_Gap.setZ_Step(z_step)
new_nodes.append(axisField_and_Quad_RF_Gap)
ovrlp_count += 1
if(ind < len(zero_length_nodes)):
new_nodes.append(zero_length_nodes[ind])
pos_start = pos_end
#--------------------------------------------------------------------------
#---- 4st STEP - nodes between two RF gaps
for local_gap_ind in range(n_rf_gaps-1):
[rf_gap0,gap0_ind,drift_down0_ind,drift_up0_ind] = rf_gap_ind_up_down_arr[local_gap_ind]
[rf_gap1,gap1_ind,drift_down1_ind,drift_up1_ind] = rf_gap_ind_up_down_arr[local_gap_ind+1]
(rf_gap0_pos_start, rf_gap0_pos_end) = node_pos_dict[rf_gap0]
(z_gap0_min,z_gap0_max) = af_rf_gap_dict[rf_gap0].getZ_Min_Max()
(rf_gap0_pos_start, rf_gap0_pos_end) = (rf_gap0_pos_start+z_gap0_min, rf_gap0_pos_end+z_gap0_max)
(rf_gap1_pos_start, rf_gap1_pos_end) = node_pos_dict[rf_gap1]
(z_gap1_min,z_gap1_max) = af_rf_gap_dict[rf_gap1].getZ_Min_Max()
(rf_gap1_pos_start, rf_gap1_pos_end) = (rf_gap1_pos_start+z_gap1_min, rf_gap1_pos_end+z_gap1_max)
(local_quads,zero_length_nodes) = Get_quads_zeroLengthNodes_in_range(accSeq,drift_up0_ind,drift_down1_ind)
node_pos_start = rf_gap0_pos_end - accSeq_z_start
node_pos_end = rf_gap1_pos_start - accSeq_z_start
if(abs(node_pos_start - node_pos_end) > drift_length_tolerance):
pos_start = node_pos_start
pos_end = node_pos_end
ovrlp_count = 0
for ind in range(len(zero_length_nodes)+1):
node = OverlappingQuadsNode()
name = rf_gap0.getName()+":After:"+str(ovrlp_count+1)+":"+node.getType()
node.setName(name)
if(ind == len(zero_length_nodes)):
pos_end = node_pos_end
else:
pos_end = zero_length_nodes[ind].getPosition()
length = pos_end - pos_start
if(abs(length) < drift_length_tolerance):
if(ind < len(zero_length_nodes)):
new_nodes.append(zero_length_nodes[ind])
pos_start = pos_end
continue
pos = (pos_end + pos_start)/2
node.setLength(length)
node.setPosition(pos)
for quad in local_quads:
node.addQuad(quad,enge_func_quad_dict[quad],quad.getPosition()-pos_start)
node.setZ_Step(z_step)
nParts = int(length/z_step)+1
node.setnParts(nParts)
new_nodes.append(node)
ovrlp_count += 1
if(ind < len(zero_length_nodes)):
new_nodes.append(zero_length_nodes[ind])
pos_start = pos_end
else:
new_nodes += zero_length_nodes
#--------------------------------------------------------------------------
new_nodes = sorted(new_nodes, key = lambda x: x.getPosition(), reverse = False)
#--------------------------------------------------------------------------
#---- rf_gap_to_cavs_dict[rf_gap] = cav
#---- new_rf_gaps_arr_in_cav_dict[cav] = [new_AxisField_and_Quad_RF_Gap_0,..]
#---- the new istances of the AxisField_and_Quad_RF_Gap has setAsFirstRFGap(False) by default
for cav in cavs:
new_rf_gaps_arr = new_rf_gaps_arr_in_cav_dict[cav]
cav.removeAllGapNodes()
for axisField_and_Quad_RF_Gap in new_rf_gaps_arr:
cav.addRF_GapNode(axisField_and_Quad_RF_Gap)
#------------------------------------------
#---- let's replace all nodes in the AccSeq by the new set
accSeq.removeAllNodes()
for node in new_nodes:
accSeq.addNode(node)
#---- new set of nodes for the lattice
new_latt_nodes = []
for accSeq in accLattice.getSequences():
new_latt_nodes += accSeq.getNodes()
accLattice.setNodes(new_latt_nodes)
accLattice.initialize()
#------- debug START printing of new nodes and their positions in the lattice
"""
node_pos_dict = accLattice.getNodePositionsDict()
for accSeq_Name in accSeq_Names:
accSeq = accLattice.getSequence(accSeq_Name)
nodes = accSeq.getNodes()
accSeq_z_start = node_pos_dict[nodes[0]][0]
#--------------------------------------------------------------------------
for node in nodes:
pos = node.getPosition()
(pos_start,pos_end) = node_pos_dict[node]
delta = pos - ((pos_start+pos_end)/2 - accSeq_z_start)
if(abs(delta) > drift_length_tolerance):
print "debug new node=",node.getName()," pos=",node.getPosition()," (pos_start,pos_end)=",node_pos_dict[node]," delta=",delta
"""
#------- debug STOP printing of new nodes and their positions in the lattice
def Get_quads_zeroLengthNodes_in_range(accSeq,node_ind_start,node_ind_end):
"""
Returns all quads and zero-length nodes in this index range.
It also checks that all elements inside this range has zero length or they
are drifts of quads.
"""
nodes = accSeq.getNodes()
zero_length_nodes = []
child_nodes = []
quads = []
for node_ind in range(node_ind_start,node_ind_end+1):
node = nodes[node_ind]
children_arr = node.getBodyChildren()
if(len(children_arr) > 0):
#print "debug ========= parent node=",node.getName()," pos = ",node.getPosition()
for child in children_arr:
if(child.getLength() == 0.):
child_nodes.append(child)
#print " debug child=",child.getName()," pos=",child.getPosition()
if(not isinstance(node,BaseRF_Gap)):
length = node.getLength()
if(length == 0.):
zero_length_nodes.append(node)
else:
if(isinstance(node,Quad)):
quads.append(node)
else:
if(not isinstance(node,Drift)):
msg = "The Replace_BaseRF_Gap_and_Quads_to_Overlapping_Nodes function. "
msg += "This Acc. Sequence has an element that "
msg += os.linesep
msg += "1. has non-zero length"
msg += os.linesep
msg += "2. not a quad"
msg += os.linesep
msg += "3. not a drift"
msg += os.linesep
msg += "This function does not know how to handle this elelement!"
msg += os.linesep
msg = msg + "Acc Sequence =" + accSeq.getName()
msg = msg + os.linesep
msg = msg + "Acc element =" + node.getName()
msg = msg + os.linesep
msg = msg + "Acc element type =" + node.getType()
msg = msg + os.linesep
orbitFinalize(msg)
zero_length_nodes += child_nodes
zero_length_nodes = sorted(zero_length_nodes, key = lambda x: x.getPosition(), reverse = False)
return (quads,zero_length_nodes)
def Find_Neihbor_Quads(accLattice,accSeq,rf_gap,gap_ind,af_rf_gap_dict,enge_func_quad_dict):
"""
This function will find the quads with the fields overlapping RF gap fields.
It returns the quads that should be taken into account.
gap_ind is an index of rf_gap in the original accLattice.getNodes().
"""
nodes = accSeq.getNodes()
node_pos_dict = accLattice.getNodePositionsDict()
(z_min,z_max) = af_rf_gap_dict[rf_gap].getZ_Min_Max()
(gap_pos_start,gap_pos_end) = node_pos_dict[rf_gap]
(gap_pos_start,gap_pos_end) = (gap_pos_start+z_min,gap_pos_end+z_max)
quads = []
#---- let's go down
node_ind = gap_ind
while(node_ind >= 0):
node = nodes[node_ind]
if(isinstance(node,Quad)):
(node_pos_start,node_pos_end) = node_pos_dict[node]
(z_min_node,z_max_node) = enge_func_quad_dict[node].getLimitsZ()
(node_pos_start,node_pos_end) = ((node_pos_start+node_pos_end)/2 + z_min_node,(node_pos_start+node_pos_end)/2 + z_max_node)
delta = node_pos_end - gap_pos_start
if(delta > 0.):
quads.append(node)
else:
break
node_ind -= 1
#---- let's go up
node_ind = gap_ind
while(node_ind <= len(nodes) -1):
node = nodes[node_ind]
if(isinstance(node,Quad)):
(node_pos_start,node_pos_end) = node_pos_dict[node]
(z_min_node,z_max_node) = enge_func_quad_dict[node].getLimitsZ()
(node_pos_start,node_pos_end) = ((node_pos_start+node_pos_end)/2 + z_min_node,(node_pos_start+node_pos_end)/2 + z_max_node)
delta = node_pos_start - gap_pos_end
if(delta < 0.):
quads.append(node)
else:
break
node_ind += 1
quads = sorted(quads, key = lambda x: x.getPosition(), reverse = False)
return quads
|
|
#!/usr/bin/env python
"""
exceptions.py
"""
################################################################################
#
# exceptions.py
#
#
# Copyright (c) 10/9/2009 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
import sys
import os
from collections import defaultdict
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Exceptions
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# if __name__ != '__main__':
# import task
class error_task(Exception):
def __init__(self, *errmsg):
Exception.__init__(self, *errmsg)
# list of associated tasks
self.tasks = set()
# error message
self.main_msg = ""
def get_main_msg(self):
"""
Make main message with lists of task names
Prefix with new lines for added emphasis
"""
# turn tasks names into 'def xxx(...): format
task_names = "\n".join("task = %r" % t._name for t in self.tasks)
if len(self.main_msg):
return "\n\n" + self.main_msg + " for\n\n%s\n" % task_names
else:
return "\n\n%s\n" % task_names
def __str__(self):
# indent
msg = self.get_main_msg() + " ".join(map(str, self.args))
return " " + msg.replace("\n", "\n ")
def specify_task(self, task, main_msg):
self.tasks.add(task)
self.main_msg = main_msg
return self
class error_task_contruction(error_task):
"""
Exceptions when contructing pipeline tasks
"""
def __init__(self, task, main_msg, *errmsg):
error_task.__init__(self, *errmsg)
self.specify_task(task, main_msg)
class RethrownJobError(error_task):
"""
Wrap up one or more exceptions rethrown across process boundaries
See multiprocessor.Server.handle_request/serve_client for an analogous function
"""
def __init__(self, job_exceptions=[]):
error_task.__init__(self)
self.job_exceptions = list(job_exceptions)
def __len__(self):
return len(self.job_exceptions)
def append(self, job_exception):
self.job_exceptions.append(job_exception)
def task_to_func_name(self, task_name):
if "mkdir " in task_name:
return task_name
return "def %s(...):" % task_name.replace("__main__.", "")
def get_nth_exception_str(self, nn=-1):
if nn == -1:
nn = len(self.job_exceptions) - 1
task_name, job_name, exception_name, exception_value, exception_stack = self.job_exceptions[
nn]
message = "\nException #%d\n" % (nn + 1)
message += " '%s%s' raised in ...\n" % (exception_name, exception_value)
if task_name:
message += " Task = %s\n %s\n\n" % (self.task_to_func_name(task_name),
job_name)
message += "%s\n" % (exception_stack, )
return message.replace("\n", "\n ")
def __str__(self):
message = ["\nOriginal exception%s:\n" %
("s" if len(self.job_exceptions) > 1 else "")]
for ii in range(len(self.job_exceptions)):
message += self.get_nth_exception_str(ii)
#
# For each exception:
# turn original exception stack message into an indented string
#
return (self.get_main_msg()).replace("\n", "\n ") + "".join(message)
class error_input_file_does_not_match(error_task):
pass
class fatal_error_input_file_does_not_match(error_task):
pass
class task_FilesArgumentsError(error_task):
pass
class task_FilesreArgumentsError(error_task):
pass
class MissingInputFileError(error_task):
pass
class JobSignalledBreak(error_task):
pass
class JobSignalledSuspend(error_task):
pass
class JobSignalledResume(error_task):
pass
class JobFailed(error_task):
pass
class PostTaskArgumentError(error_task):
pass
class JobsLimitArgumentError(error_task):
pass
class error_task_get_output(error_task_contruction):
pass
class error_task_transform_inputs_multiple_args(error_task_contruction):
pass
class error_task_transform(error_task_contruction):
pass
class error_task_product(error_task_contruction):
pass
class error_task_mkdir(error_task_contruction):
pass
class error_task_permutations(error_task_contruction):
pass
class error_task_combinations(error_task_contruction):
pass
class error_task_combinations_with_replacement(error_task_contruction):
pass
class error_task_merge(error_task_contruction):
pass
class error_task_subdivide(error_task_contruction):
pass
class error_task_originate(error_task_contruction):
pass
class error_task_collate(error_task_contruction):
pass
class error_task_collate_inputs_multiple_args(error_task_contruction):
pass
class error_task_split(error_task_contruction):
pass
class error_task_files_re(error_task_contruction):
pass
class error_task_files(error_task_contruction):
pass
class error_task_parallel(error_task_contruction):
pass
class error_making_directory(error_task):
pass
class error_duplicate_task_name(error_task):
pass
class error_decorator_args(error_task):
pass
class error_task_name_lookup_failed(error_task):
pass
class error_task_decorator_takes_no_args(error_task):
pass
class error_function_is_not_a_task(error_task):
pass
class error_ambiguous_task(error_task):
pass
class error_not_a_pipeline(error_task):
pass
class error_circular_dependencies(error_task):
pass
class error_not_a_directory(error_task):
pass
class error_missing_output(error_task):
pass
class error_job_signalled_interrupt(error_task):
pass
class error_node_not_task(error_task):
pass
class error_missing_runtime_parameter(error_task):
pass
class error_unescaped_regular_expression_forms(error_task):
pass
class error_checksum_level(error_task):
pass
class error_missing_args(error_task):
pass
class error_too_many_args(error_task):
pass
class error_inputs_multiple_args(error_task):
pass
class error_set_input(error_task):
pass
class error_set_output(error_task):
pass
class error_no_head_tasks(error_task):
pass
class error_no_tail_tasks(error_task):
pass
class error_executable_str(error_task):
pass
class error_extras_wrong_type(error_task):
pass
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Testing
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
if __name__ == '__main__':
import unittest
#
# minimal task object to test exceptions
#
class task:
class Task (object):
"""
dummy task
"""
_action_mkdir = 1
def __init__(self, _name, _action_type=0):
self._action_type = _action_type
self._name = _name
class Test_exceptions(unittest.TestCase):
# self.assertEqual(self.seq, range(10))
# self.assert_(element in self.seq)
# self.assertRaises(ValueError, random.sample, self.seq, 20)
def test_error_task(self):
"""
test
"""
fake_task1 = task.Task("task1")
fake_task2 = task.Task("task2")
fake_mkdir_task3 = task.Task("task3", task.Task._action_mkdir)
fake_mkdir_task4 = task.Task("task4", task.Task._action_mkdir)
e = error_task()
e.specify_task(fake_task1, "Some message 0")
e.specify_task(fake_task2, "Some message 1")
e.specify_task(fake_mkdir_task3, "Some message 2")
e.specify_task(fake_mkdir_task4, "Some message 3")
self.assertEqual(str(e),
"""
Some message 3 for
'def task1(...):'
'def task2(...):'
task3
task4
""")
def test_RethrownJobError(self):
"""
test
"""
#job_name, exception_name, exception_value, exception_stack
exception_data = [
[
"task1",
"[[temp_branching_dir/a.2, a.1] -> temp_branching_dir/a.3]",
"ruffus.task.MissingInputFileError",
"(instance value)",
"Traceback (most recent call last):\n File \"what.file.py\", line 333, in some_func\n somecode(sfasf)\n"
],
[
"task1",
"[None -> [temp_branching_dir/a.1, temp_branching_dir/b.1, temp_branching_dir/c.1]]",
"exceptions.ZeroDivisionError:",
"(1)",
"Traceback (most recent call last):\n File \"anotherfile.py\", line 345, in other_func\n badcode(rotten)\n"
]
]
e = RethrownJobError(exception_data)
fake_task1 = task.Task("task1")
fake_task2 = task.Task("task2")
fake_mkdir_task3 = task.Task("task3", task.Task._action_mkdir)
fake_mkdir_task4 = task.Task("task4", task.Task._action_mkdir)
e.specify_task(fake_task1, "Exceptions running jobs")
e.specify_task(fake_task2, "Exceptions running jobs")
e.specify_task(fake_mkdir_task3, "Exceptions running jobs")
e.specify_task(fake_mkdir_task4, "Exceptions running jobs")
self.assertEqual(str(e),
"""
Exceptions running jobs for
'def task1(...):'
'def task2(...):'
task3
task4
Original exceptions:
Exception #1
ruffus.task.MissingInputFileError(instance value):
for task1.[[temp_branching_dir/a.2, a.1] -> temp_branching_dir/a.3]
Traceback (most recent call last):
File "what.file.py", line 333, in some_func
somecode(sfasf)
Exception #2
exceptions.ZeroDivisionError:(1):
for task1.[None -> [temp_branching_dir/a.1, temp_branching_dir/b.1, temp_branching_dir/c.1]]
Traceback (most recent call last):
File "anotherfile.py", line 345, in other_func
badcode(rotten)
""")
#
# debug code not run if called as a module
#
if __name__ == '__main__':
if sys.argv.count("--debug"):
sys.argv.remove("--debug")
unittest.main()
|
|
"""
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.domains.std import Cmdoption
from sphinx.util.console import bold
from sphinx.util.nodes import set_source_info
try:
from sphinx.writers.html import SmartyPantsHTMLTranslator as HTMLTranslator
except ImportError: # Sphinx 1.6+
from sphinx.writers.html import HTMLTranslator
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_description_unit(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_directive('django-admin-option', Cmdoption)
app.add_config_value('django_next_version', '0.0', True)
# app.add_directive('versionadded', VersionDirective)
# app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
# register the snippet directive
app.add_directive('snippet', SnippetWithFilename)
# register a node for snippet directive so that the xml parser
# knows how to handle the enter/exit parsing event
app.add_node(snippet_with_filename,
html=(visit_snippet, depart_snippet_literal),
latex=(visit_snippet_latex, depart_snippet_latex),
man=(visit_snippet_literal, depart_snippet_literal),
text=(visit_snippet_literal, depart_snippet_literal),
texinfo=(visit_snippet_literal, depart_snippet_literal))
app.set_translator('djangohtml', DjangoHTMLTranslator)
app.set_translator('json', DjangoHTMLTranslator)
return {'parallel_read_safe': True}
class snippet_with_filename(nodes.literal_block):
"""
Subclass the literal_block to override the visit/depart event handlers
"""
pass
def visit_snippet_literal(self, node):
"""
default literal block handler
"""
self.visit_literal_block(node)
def depart_snippet_literal(self, node):
"""
default literal block handler
"""
self.depart_literal_block(node)
def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang,
warn=warner,
linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s snippet' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode
def visit_snippet_latex(self, node):
"""
Latex document generator visit handler
"""
code = node.rawsource.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.curfilestack[-1], node.line))
hlcode = self.highlighter.highlight_block(code, lang, warn=warner,
linenos=linenos,
**highlight_args)
self.body.append(
'\n{\\colorbox[rgb]{0.9,0.9,0.9}'
'{\\makebox[\\textwidth][l]'
'{\\small\\texttt{%s}}}}\n' % (
# Some filenames have '_', which is special in latex.
fname.replace('_', r'\_'),
)
)
if self.table:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{OriginalVerbatim}')
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
hlcode = hlcode.rstrip() + '\n'
self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' %
(self.table and 'Original' or ''))
# Prevent rawsource from appearing in output a second time.
raise nodes.SkipNode
def depart_snippet_latex(self, node):
"""
Latex document generator depart handler.
"""
pass
class SnippetWithFilename(Directive):
"""
The 'snippet' directive that allows to add the filename (optional)
of a code snippet in the document. This is modeled after CodeBlock.
"""
has_content = True
optional_arguments = 1
option_spec = {'filename': directives.unchanged_required}
def run(self):
code = '\n'.join(self.content)
literal = snippet_with_filename(code, code)
if self.arguments:
literal['language'] = self.arguments[0]
literal['filename'] = self.options['filename']
set_source_info(self, literal)
return [literal]
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(HTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum(isinstance(c, addnodes.desc_parameter) for c in node.children)
def depart_desc_parameterlist(self, node):
self.body.append(')')
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
super().visit_section(node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env.ref_context['std:program'] = command
title = "django-admin %s" % sig
signode += addnodes.desc_name(title, title)
return command
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super().finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatetag" and k == "ref/templates/builtins"
],
"tfilters": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatefilter" and k == "ref/templates/builtins"
],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
import logging as real_logging
import re
import time
import jsonschema
import six
from tempest_lib.common import http
from tempest_lib.common.utils import misc as misc_utils
from tempest_lib import exceptions
from tempest_lib.openstack.common import log as logging
# redrive rate limited calls at most twice
MAX_RECURSION_DEPTH = 2
# All the successful HTTP status codes from RFC 7231 & 4918
HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206, 207)
class RestClient(object):
TYPE = "json"
LOG = logging.getLogger(__name__)
def __init__(self, auth_provider, service, region,
endpoint_type='publicURL',
build_interval=1, build_timeout=60,
disable_ssl_certificate_validation=False, ca_certs=None,
trace_requests=''):
self.auth_provider = auth_provider
self.service = service
self.region = region
self.endpoint_type = endpoint_type
self.build_interval = build_interval
self.build_timeout = build_timeout
self.trace_requests = trace_requests
# The version of the API this client implements
self.api_version = None
self._skip_path = False
self.general_header_lc = set(('cache-control', 'connection',
'date', 'pragma', 'trailer',
'transfer-encoding', 'via',
'warning'))
self.response_header_lc = set(('accept-ranges', 'age', 'etag',
'location', 'proxy-authenticate',
'retry-after', 'server',
'vary', 'www-authenticate'))
dscv = disable_ssl_certificate_validation
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv, ca_certs=ca_certs)
def _get_type(self):
return self.TYPE
def get_headers(self, accept_type=None, send_type=None):
if accept_type is None:
accept_type = self._get_type()
if send_type is None:
send_type = self._get_type()
return {'Content-Type': 'application/%s' % send_type,
'Accept': 'application/%s' % accept_type}
def __str__(self):
STRING_LIMIT = 80
str_format = ("service:%s, base_url:%s, "
"filters: %s, build_interval:%s, build_timeout:%s"
"\ntoken:%s..., \nheaders:%s...")
return str_format % (self.service, self.base_url,
self.filters, self.build_interval,
self.build_timeout,
str(self.token)[0:STRING_LIMIT],
str(self.get_headers())[0:STRING_LIMIT])
@property
def user(self):
return self.auth_provider.credentials.username
@property
def user_id(self):
return self.auth_provider.credentials.user_id
@property
def tenant_name(self):
return self.auth_provider.credentials.tenant_name
@property
def tenant_id(self):
return self.auth_provider.credentials.tenant_id
@property
def password(self):
return self.auth_provider.credentials.password
@property
def base_url(self):
return self.auth_provider.base_url(filters=self.filters)
@property
def token(self):
return self.auth_provider.get_token()
@property
def filters(self):
_filters = dict(
service=self.service,
endpoint_type=self.endpoint_type,
region=self.region
)
if self.api_version is not None:
_filters['api_version'] = self.api_version
if self._skip_path:
_filters['skip_path'] = self._skip_path
return _filters
def skip_path(self):
"""When set, ignore the path part of the base URL from the catalog"""
self._skip_path = True
def reset_path(self):
"""When reset, use the base URL from the catalog as-is"""
self._skip_path = False
@classmethod
def expected_success(cls, expected_code, read_code):
assert_msg = ("This function only allowed to use for HTTP status"
"codes which explicitly defined in the RFC 7231 & 4918."
"{0} is not a defined Success Code!"
).format(expected_code)
if isinstance(expected_code, list):
for code in expected_code:
assert code in HTTP_SUCCESS, assert_msg
else:
assert expected_code in HTTP_SUCCESS, assert_msg
# NOTE(afazekas): the http status code above 400 is processed by
# the _error_checker method
if read_code < 400:
pattern = """Unexpected http success status code {0},
The expected status code is {1}"""
if ((not isinstance(expected_code, list) and
(read_code != expected_code)) or
(isinstance(expected_code, list) and
(read_code not in expected_code))):
details = pattern.format(read_code, expected_code)
raise exceptions.InvalidHttpSuccessCode(details)
def post(self, url, body, headers=None, extra_headers=False):
return self.request('POST', url, extra_headers, headers, body)
def get(self, url, headers=None, extra_headers=False):
return self.request('GET', url, extra_headers, headers)
def delete(self, url, headers=None, body=None, extra_headers=False):
return self.request('DELETE', url, extra_headers, headers, body)
def patch(self, url, body, headers=None, extra_headers=False):
return self.request('PATCH', url, extra_headers, headers, body)
def put(self, url, body, headers=None, extra_headers=False):
return self.request('PUT', url, extra_headers, headers, body)
def head(self, url, headers=None, extra_headers=False):
return self.request('HEAD', url, extra_headers, headers)
def copy(self, url, headers=None, extra_headers=False):
return self.request('COPY', url, extra_headers, headers)
def get_versions(self):
resp, body = self.get('')
body = self._parse_resp(body)
versions = map(lambda x: x['id'], body)
return resp, versions
def _get_request_id(self, resp):
for i in ('x-openstack-request-id', 'x-compute-request-id'):
if i in resp:
return resp[i]
return ""
def _safe_body(self, body, maxlen=4096):
# convert a structure into a string safely
try:
text = six.text_type(body)
except UnicodeDecodeError:
# if this isn't actually text, return marker that
return "<BinaryData: removed>"
if len(text) > maxlen:
return text[:maxlen]
else:
return text
def _log_request_start(self, method, req_url, req_headers=None,
req_body=None):
if req_headers is None:
req_headers = {}
caller_name = misc_utils.find_test_caller()
if self.trace_requests and re.search(self.trace_requests, caller_name):
self.LOG.debug('Starting Request (%s): %s %s' %
(caller_name, method, req_url))
def _log_request_full(self, method, req_url, resp,
secs="", req_headers=None,
req_body=None, resp_body=None,
caller_name=None, extra=None):
if 'X-Auth-Token' in req_headers:
req_headers['X-Auth-Token'] = '<omitted>'
log_fmt = """Request (%s): %s %s %s%s
Request - Headers: %s
Body: %s
Response - Headers: %s
Body: %s"""
self.LOG.debug(
log_fmt % (
caller_name,
resp['status'],
method,
req_url,
secs,
str(req_headers),
self._safe_body(req_body),
str(resp),
self._safe_body(resp_body)),
extra=extra)
def _log_request(self, method, req_url, resp,
secs="", req_headers=None,
req_body=None, resp_body=None):
if req_headers is None:
req_headers = {}
# if we have the request id, put it in the right part of the log
extra = dict(request_id=self._get_request_id(resp))
# NOTE(sdague): while we still have 6 callers to this function
# we're going to just provide work around on who is actually
# providing timings by gracefully adding no content if they don't.
# Once we're down to 1 caller, clean this up.
caller_name = misc_utils.find_test_caller()
if secs:
secs = " %.3fs" % secs
if not self.LOG.isEnabledFor(real_logging.DEBUG):
self.LOG.info(
'Request (%s): %s %s %s%s' % (
caller_name,
resp['status'],
method,
req_url,
secs),
extra=extra)
# Also look everything at DEBUG if you want to filter this
# out, don't run at debug.
self._log_request_full(method, req_url, resp, secs, req_headers,
req_body, resp_body, caller_name, extra)
def _parse_resp(self, body):
body = json.loads(body)
# We assume, that if the first value of the deserialized body's
# item set is a dict or a list, that we just return the first value
# of deserialized body.
# Essentially "cutting out" the first placeholder element in a body
# that looks like this:
#
# {
# "users": [
# ...
# ]
# }
try:
# Ensure there are not more than one top-level keys
if len(body.keys()) > 1:
return body
# Just return the "wrapped" element
first_key, first_item = six.next(six.iteritems(body))
if isinstance(first_item, (dict, list)):
return first_item
except (ValueError, IndexError):
pass
return body
def response_checker(self, method, resp, resp_body):
if (resp.status in set((204, 205, 304)) or resp.status < 200 or
method.upper() == 'HEAD') and resp_body:
raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
# NOTE(afazekas):
# If the HTTP Status Code is 205
# 'The response MUST NOT include an entity.'
# A HTTP entity has an entity-body and an 'entity-header'.
# In the HTTP response specification (Section 6) the 'entity-header'
# 'generic-header' and 'response-header' are in OR relation.
# All headers not in the above two group are considered as entity
# header in every interpretation.
if (resp.status == 205 and
0 != len(set(resp.keys()) - set(('status',)) -
self.response_header_lc - self.general_header_lc)):
raise exceptions.ResponseWithEntity()
# NOTE(afazekas)
# Now the swift sometimes (delete not empty container)
# returns with non json error response, we can create new rest class
# for swift.
# Usually RFC2616 says error responses SHOULD contain an explanation.
# The warning is normal for SHOULD/SHOULD NOT case
# Likely it will cause an error
if method != 'HEAD' and not resp_body and resp.status >= 400:
self.LOG.warning("status >= 400 response with empty body")
def _request(self, method, url, headers=None, body=None):
"""A simple HTTP request interface."""
# Authenticate the request with the auth provider
req_url, req_headers, req_body = self.auth_provider.auth_request(
method, url, headers, body, self.filters)
# Do the actual request, and time it
start = time.time()
self._log_request_start(method, req_url)
resp, resp_body = self.raw_request(
req_url, method, headers=req_headers, body=req_body)
end = time.time()
self._log_request(method, req_url, resp, secs=(end - start),
req_headers=req_headers, req_body=req_body,
resp_body=resp_body)
# Verify HTTP response codes
self.response_checker(method, resp, resp_body)
return resp, resp_body
def raw_request(self, url, method, headers=None, body=None):
if headers is None:
headers = self.get_headers()
return self.http_obj.request(url, method,
headers=headers, body=body)
def request(self, method, url, extra_headers=False, headers=None,
body=None):
# if extra_headers is True
# default headers would be added to headers
retry = 0
if headers is None:
# NOTE(vponomaryov): if some client do not need headers,
# it should explicitly pass empty dict
headers = self.get_headers()
elif extra_headers:
try:
headers = headers.copy()
headers.update(self.get_headers())
except (ValueError, TypeError):
headers = self.get_headers()
resp, resp_body = self._request(method, url,
headers=headers, body=body)
while (resp.status == 413 and
'retry-after' in resp and
not self.is_absolute_limit(
resp, self._parse_resp(resp_body)) and
retry < MAX_RECURSION_DEPTH):
retry += 1
delay = int(resp['retry-after'])
time.sleep(delay)
resp, resp_body = self._request(method, url,
headers=headers, body=body)
self._error_checker(method, url, headers, body,
resp, resp_body)
return resp, resp_body
def _error_checker(self, method, url,
headers, body, resp, resp_body):
# NOTE(mtreinish): Check for httplib response from glance_http. The
# object can't be used here because importing httplib breaks httplib2.
# If another object from a class not imported were passed here as
# resp this could possibly fail
if str(type(resp)) == "<type 'instance'>":
ctype = resp.getheader('content-type')
else:
try:
ctype = resp['content-type']
# NOTE(mtreinish): Keystone delete user responses doesn't have a
# content-type header. (They don't have a body) So just pretend it
# is set.
except KeyError:
ctype = 'application/json'
# It is not an error response
if resp.status < 400:
return
JSON_ENC = ['application/json', 'application/json; charset=utf-8']
# NOTE(mtreinish): This is for compatibility with Glance and swift
# APIs. These are the return content types that Glance api v1
# (and occasionally swift) are using.
#application/octet-stream added. Bug = 1417458
TXT_ENC = ['text/plain', 'text/html', 'text/html; charset=utf-8',
'text/plain; charset=utf-8',"application/octet-stream"]
if ctype.lower() in JSON_ENC:
parse_resp = True
elif ctype.lower() in TXT_ENC:
parse_resp = False
else:
raise exceptions.InvalidContentType(str(resp.status))
if resp.status == 401 or resp.status == 403 or resp.status == 405:
raise exceptions.Unauthorized(resp_body)
if resp.status == 404:
raise exceptions.NotFound(resp_body)
if resp.status == 400:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.BadRequest(resp_body)
if resp.status == 409:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.Conflict(resp_body)
if resp.status == 413:
if parse_resp:
resp_body = self._parse_resp(resp_body)
if self.is_absolute_limit(resp, resp_body):
raise exceptions.OverLimit(resp_body)
else:
raise exceptions.RateLimitExceeded(resp_body)
if resp.status == 415:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.InvalidContentType(resp_body)
if resp.status == 422:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.UnprocessableEntity(resp_body)
if resp.status in (500, 501):
message = resp_body
if parse_resp:
try:
resp_body = self._parse_resp(resp_body)
except ValueError:
# If response body is a non-json string message.
# Use resp_body as is and raise InvalidResponseBody
# exception.
raise exceptions.InvalidHTTPResponseBody(message)
else:
if isinstance(resp_body, dict):
# I'm seeing both computeFault
# and cloudServersFault come back.
# Will file a bug to fix, but leave as is for now.
if 'cloudServersFault' in resp_body:
message = resp_body['cloudServersFault']['message']
elif 'computeFault' in resp_body:
message = resp_body['computeFault']['message']
elif 'error' in resp_body:
message = resp_body['error']['message']
elif 'message' in resp_body:
message = resp_body['message']
else:
message = resp_body
if resp.status == 501:
raise exceptions.NotImplemented(message)
else:
raise exceptions.ServerFault(message)
if resp.status >= 400:
raise exceptions.UnexpectedResponseCode(str(resp.status))
def is_absolute_limit(self, resp, resp_body):
if (not isinstance(resp_body, collections.Mapping) or
'retry-after' not in resp):
return True
over_limit = resp_body.get('overLimit', None)
if not over_limit:
return True
return 'exceed' in over_limit.get('message', 'blabla')
def wait_for_resource_deletion(self, id):
"""Waits for a resource to be deleted."""
start_time = int(time.time())
while True:
if self.is_resource_deleted(id):
return
if int(time.time()) - start_time >= self.build_timeout:
message = ('Failed to delete %(resource_type)s %(id)s within '
'the required time (%(timeout)s s).' %
{'resource_type': self.resource_type, 'id': id,
'timeout': self.build_timeout})
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
def is_resource_deleted(self, id):
"""Subclasses override with specific deletion detection."""
message = ('"%s" does not implement is_resource_deleted'
% self.__class__.__name__)
raise NotImplementedError(message)
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'resource'
@classmethod
def validate_response(cls, schema, resp, body):
# Only check the response if the status code is a success code
# TODO(cyeoh): Eventually we should be able to verify that a failure
# code if it exists is something that we expect. This is explicitly
# declared in the V3 API and so we should be able to export this in
# the response schema. For now we'll ignore it.
if resp.status in HTTP_SUCCESS:
cls.expected_success(schema['status_code'], resp.status)
# Check the body of a response
body_schema = schema.get('response_body')
if body_schema:
try:
jsonschema.validate(body, body_schema)
except jsonschema.ValidationError as ex:
msg = ("HTTP response body is invalid (%s)") % ex
raise exceptions.InvalidHTTPResponseBody(msg)
else:
if body:
msg = ("HTTP response body should not exist (%s)") % body
raise exceptions.InvalidHTTPResponseBody(msg)
# Check the header of a response
header_schema = schema.get('response_header')
if header_schema:
try:
jsonschema.validate(resp, header_schema)
except jsonschema.ValidationError as ex:
msg = ("HTTP response header is invalid (%s)") % ex
raise exceptions.InvalidHTTPResponseHeader(msg)
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
import webapp2
import webtest
# pylint: disable=unused-import
from dashboard import mock_oauth2_decorator
# pylint: enable=unused-import
from dashboard import associate_alerts
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import sheriff
from dashboard.services import issue_tracker_service
class AssociateAlertsTest(testing_common.TestCase):
def setUp(self):
super(AssociateAlertsTest, self).setUp()
app = webapp2.WSGIApplication([(
'/associate_alerts', associate_alerts.AssociateAlertsHandler)])
self.testapp = webtest.TestApp(app)
testing_common.SetSheriffDomains(['chromium.org'])
self.SetCurrentUser('[email protected]', is_admin=True)
def _AddSheriff(self):
"""Adds a Sheriff and returns its key."""
return sheriff.Sheriff(
id='Chromium Perf Sheriff', email='[email protected]').put()
def _AddTests(self):
"""Adds sample Tests and returns a list of their keys."""
testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {
'scrolling-benchmark': {
'first_paint': {},
'mean_frame_time': {},
}
})
return map(utils.TestKey, [
'ChromiumGPU/linux-release/scrolling-benchmark/first_paint',
'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time',
])
def _AddAnomalies(self):
"""Adds sample Anomaly data and returns a dict of revision to key."""
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
key_map = {}
# Add anomalies to the two tests alternately.
for end_rev in range(10000, 10120, 10):
test_key = test_keys[0] if end_rev % 20 == 0 else test_keys[1]
anomaly_key = anomaly.Anomaly(
start_revision=(end_rev - 5), end_revision=end_rev, test=test_key,
median_before_anomaly=100, median_after_anomaly=200,
sheriff=sheriff_key).put()
key_map[end_rev] = anomaly_key.urlsafe()
# Add an anomaly that overlaps.
anomaly_key = anomaly.Anomaly(
start_revision=9990, end_revision=9996, test=test_keys[0],
median_before_anomaly=100, median_after_anomaly=200,
sheriff=sheriff_key).put()
key_map[9996] = anomaly_key.urlsafe()
# Add an anomaly that overlaps and has bug ID.
anomaly_key = anomaly.Anomaly(
start_revision=9990, end_revision=9997, test=test_keys[0],
median_before_anomaly=100, median_after_anomaly=200,
sheriff=sheriff_key, bug_id=12345).put()
key_map[9997] = anomaly_key.urlsafe()
return key_map
def testGet_NoKeys_ShowsError(self):
response = self.testapp.get('/associate_alerts')
self.assertIn('<div class="error">', response.body)
def testGet_SameAsPost(self):
get_response = self.testapp.get('/associate_alerts')
post_response = self.testapp.post('/associate_alerts')
self.assertEqual(get_response.body, post_response.body)
def testGet_InvalidBugId_ShowsError(self):
key_map = self._AddAnomalies()
response = self.testapp.get(
'/associate_alerts?keys=%s&bug_id=foo' % key_map[9996])
self.assertIn('<div class="error">', response.body)
self.assertIn('Invalid bug ID', response.body)
# Mocks fetching bugs from issue tracker.
@mock.patch('services.issue_tracker_service.discovery.build',
mock.MagicMock())
@mock.patch.object(
issue_tracker_service.IssueTrackerService, 'List',
mock.MagicMock(return_value={
'items': [
{
'id': 12345,
'summary': '5% regression in bot/suite/x at 10000:20000',
'state': 'open',
'status': 'New',
'author': {'name': '[email protected]'},
},
{
'id': 13579,
'summary': '1% regression in bot/suite/y at 10000:20000',
'state': 'closed',
'status': 'WontFix',
'author': {'name': '[email protected]'},
},
]}))
def testGet_NoBugId_ShowsDialog(self):
# When a GET request is made with some anomaly keys but no bug ID,
# A HTML form is shown for the user to input a bug number.
key_map = self._AddAnomalies()
response = self.testapp.get('/associate_alerts?keys=%s' % key_map[10000])
# The response contains a table of recent bugs and a form.
self.assertIn('12345', response.body)
self.assertIn('13579', response.body)
self.assertIn('<form', response.body)
def testGet_WithBugId_AlertIsAssociatedWithBugId(self):
# When the bug ID is given and the alerts overlap, then the Anomaly
# entities are updated and there is a response indicating success.
key_map = self._AddAnomalies()
response = self.testapp.get(
'/associate_alerts?keys=%s,%s&bug_id=12345' % (
key_map[9996], key_map[10000]))
# The response page should have a bug number.
self.assertIn('12345', response.body)
# The Anomaly entities should be updated.
for anomaly_entity in anomaly.Anomaly.query().fetch():
if anomaly_entity.end_revision in (10000, 9996):
self.assertEqual(12345, anomaly_entity.bug_id)
elif anomaly_entity.end_revision != 9997:
self.assertIsNone(anomaly_entity.bug_id)
def testGet_TargetBugHasNoAlerts_DoesNotAskForConfirmation(self):
# Associating alert with bug ID that has no alerts is always OK.
key_map = self._AddAnomalies()
response = self.testapp.get(
'/associate_alerts?keys=%s,%s&bug_id=578' % (
key_map[9996], key_map[10000]))
# The response page should have a bug number.
self.assertIn('578', response.body)
# The Anomaly entities should be updated.
self.assertEqual(
578, anomaly.Anomaly.query(
anomaly.Anomaly.end_revision == 9996).get().bug_id)
self.assertEqual(
578, anomaly.Anomaly.query(
anomaly.Anomaly.end_revision == 10000).get().bug_id)
def testGet_NonOverlappingAlerts_AsksForConfirmation(self):
# Associating alert with bug ID that contains non-overlapping revision
# ranges should show a confirmation page.
key_map = self._AddAnomalies()
response = self.testapp.get(
'/associate_alerts?keys=%s,%s&bug_id=12345' % (
key_map[10000], key_map[10010]))
# The response page should show confirmation page.
self.assertIn('Do you want to continue?', response.body)
# The Anomaly entities should not be updated.
for anomaly_entity in anomaly.Anomaly.query().fetch():
if anomaly_entity.end_revision != 9997:
self.assertIsNone(anomaly_entity.bug_id)
def testGet_WithConfirm_AssociatesWithNewBugId(self):
# Associating alert with bug ID and with confirmed non-overlapping revision
# range should update alert with bug ID.
key_map = self._AddAnomalies()
response = self.testapp.get(
'/associate_alerts?confirm=true&keys=%s,%s&bug_id=12345' % (
key_map[10000], key_map[10010]))
# The response page should have the bug number.
self.assertIn('12345', response.body)
# The Anomaly entities should be updated.
for anomaly_entity in anomaly.Anomaly.query().fetch():
if anomaly_entity.end_revision in (10000, 10010):
self.assertEqual(12345, anomaly_entity.bug_id)
elif anomaly_entity.end_revision != 9997:
self.assertIsNone(anomaly_entity.bug_id)
def testRevisionRangeFromSummary(self):
# If the summary is in the expected format, a pair is returned.
self.assertEqual(
(10000, 10500),
associate_alerts._RevisionRangeFromSummary(
'1% regression in bot/my_suite/test at 10000:10500'))
# Otherwise None is returned.
self.assertIsNone(
associate_alerts._RevisionRangeFromSummary(
'Regression in rev ranges 12345 to 20000'))
def testRangesOverlap_NonOverlapping_ReturnsFalse(self):
self.assertFalse(associate_alerts._RangesOverlap((1, 5), (6, 9)))
self.assertFalse(associate_alerts._RangesOverlap((6, 9), (1, 5)))
def testRangesOverlap_NoneGiven_ReturnsFalse(self):
self.assertFalse(associate_alerts._RangesOverlap((1, 5), None))
self.assertFalse(associate_alerts._RangesOverlap(None, (1, 5)))
self.assertFalse(associate_alerts._RangesOverlap(None, None))
def testRangesOverlap_OneIncludesOther_ReturnsTrue(self):
# True if one range envelopes the other.
self.assertTrue(associate_alerts._RangesOverlap((1, 9), (2, 5)))
self.assertTrue(associate_alerts._RangesOverlap((2, 5), (1, 9)))
def testRangesOverlap_PartlyOverlap_ReturnsTrue(self):
self.assertTrue(associate_alerts._RangesOverlap((1, 6), (5, 9)))
self.assertTrue(associate_alerts._RangesOverlap((5, 9), (1, 6)))
def testRangesOverlap_CommonBoundary_ReturnsTrue(self):
self.assertTrue(associate_alerts._RangesOverlap((1, 6), (6, 9)))
self.assertTrue(associate_alerts._RangesOverlap((6, 9), (1, 6)))
if __name__ == '__main__':
unittest.main()
|
|
import unittest
import time
from rdkit import Chem
from rdkit.Chem import MCS
def load_smiles(text):
mols = []
for line in text.strip().splitlines():
smiles = line.split()[0]
mol = Chem.MolFromSmiles(smiles)
assert mol is not None, smiles
mols.append(mol)
return mols
_ignore = object()
class MCSTestCase(unittest.TestCase):
def assert_search(self, smiles, numAtoms, numBonds, smarts=_ignore, **kwargs):
result = MCS.FindMCS(smiles, **kwargs)
self.assert_result(result, completed=1, numAtoms=numAtoms, numBonds=numBonds, smarts=smarts)
def assert_result(self, result, completed=_ignore, numAtoms=_ignore, numBonds=_ignore,
smarts=_ignore):
if completed is not _ignore:
self.assertEqual(result.completed, completed)
if numAtoms is not _ignore:
self.assertEqual(result.numAtoms, numAtoms)
if numBonds is not _ignore:
self.assertEqual(result.numBonds, numBonds)
if smarts is not _ignore:
self.assertEqual(result.smarts, smarts)
simple_mols = load_smiles("""
c1ccccc1O phenol
CO methanol""")
class TestMinAtoms(MCSTestCase):
def test_min_atoms_2(self):
self.assert_search(simple_mols, 2, 1, minNumAtoms=2)
def test_min_atoms_3(self):
self.assert_search(simple_mols, -1, -1, smarts=None, minNumAtoms=3)
def test_min_atoms_1(self):
try:
result = MCS.FindMCS(simple_mols, minNumAtoms=1)
except ValueError:
pass
else:
raise AssertionError("should have raised an exception")
maximize_mols = load_smiles("""
C12CCC1CC2OCCCCCCC 2-rings-and-chain-with-O
C12CCC1CC2SCCCCCCC 2-rings-and-chain-with-S
""")
class TextMaximize(MCSTestCase):
# C12CCC1CC2OCCCCCCC 2-rings-and-chain-with-O
# C12CCC1CC2SCCCCCCC 2-rings-and-chain-with-S
def test_maximize_default(self):
# default maximizes the number of bonds
self.assert_search(maximize_mols, 6, 7)
def test_maximize_atoms(self):
self.assert_search(maximize_mols, 7, 6, maximize="atoms")
def test_maximize_bonds(self):
self.assert_search(maximize_mols, 6, 7, maximize="bonds")
atomtype_mols = load_smiles("""
c1ccccc1O phenol
CCCCCCOn1cccc1 different-answers-depending-on-type
""")
class TestAtomTypes(MCSTestCase):
# The tests compare:
# c1ccccc1O
# CCCCCCOn1cccc1
def test_atom_compare_default(self):
self.assert_search(atomtype_mols, 4, 3, smarts='[#6](:[#6]):[#6]:[#6]')
def test_atom_compare_elements(self):
self.assert_search(atomtype_mols, 4, 3, smarts='[#6](:[#6]):[#6]:[#6]', atomCompare="elements")
def test_atom_compare_any(self):
# Note: bond aromaticies must still match!
# 'cccccO' matches 'ccccnO'
self.assert_search(atomtype_mols, 6, 5, atomCompare="any")
def test_atom_compare_any_bond_compare_any(self):
# Linear chain of 7 atoms
self.assert_search(atomtype_mols, 7, 6, atomCompare="any", bondCompare="any")
def test_bond_compare_any(self):
# Linear chain of 7 atoms
self.assert_search(atomtype_mols, 7, 6, bondCompare="any")
isotope_mols = load_smiles("""
C1C[0N]CC[5C]1[1C][2C][2C][3C] C1223
C1CPCC[4C]1[2C][2C][1C][3C] C2213
""")
class TestIsotopes(MCSTestCase):
# C1C[0N]CC[5C]1[1C][2C][2C][3C] C1223
# C1CPCC[4C]1[2C][2C][1C][3C] C2213
def test_without_isotope(self):
# The entire system, except the N/P in the ring
self.assert_search(isotope_mols, numAtoms=9, numBonds=8)
def test_isotopes(self):
# 5 atoms of class '0' in the ring
self.assert_search(isotope_mols, 5, 4, atomCompare="isotopes")
def test_isotope_complete_ring_only(self):
# the 122 in the chain
self.assert_search(isotope_mols, 3, 2, atomCompare="isotopes", completeRingsOnly=True)
bondtype_mols = load_smiles("""
C1CCCCC1OC#CC#CC#CC#CC#CC first
c1ccccc1ONCCCCCCCCCC second
""")
class TestBondTypes(MCSTestCase):
# C1CCCCC1OC#CC#CC#CC#CC#CC
# c1ccccc1ONCCCCCCCCCC second
def test_bond_compare_default(self):
# Match the 'CCCCCC' part of the first ring, with the second's tail
self.assert_search(bondtype_mols, 6, 5)
def test_bond_compare_bondtypes(self):
# Repeat of the previous
self.assert_search(bondtype_mols, 6, 5, bondCompare="bondtypes")
def test_bond_compare_any(self):
# the CC#CC chain matches the CCCC tail
self.assert_search(bondtype_mols, 10, 9, bondCompare="any")
def test_atom_compare_elements_bond_compare_any(self):
self.assert_search(bondtype_mols, 10, 9, atomCompare="elements", bondCompare="any")
def test_atom_compare_any_bond_compare_any(self):
# complete match!
self.assert_search(bondtype_mols, 18, 18, atomCompare="any", bondCompare="any")
valence_mols = load_smiles("""
CCCCCCCCN
CCC[CH-]CCCC
""")
class TestValences(MCSTestCase):
def test_valence_compare_default(self):
# match 'CCCCCCCC'
self.assert_search(valence_mols, 8, 7)
def test_valence_compare_valence(self):
# match 'CCCC'
self.assert_search(valence_mols, 4, 3, matchValences=True)
def test_valence_compare_valence(self):
# match 'CCCCN' to '[CH-]CCCC' (but in reverse)
self.assert_search(valence_mols, 5, 4, matchValences=True, atomCompare="any")
ring_mols = load_smiles("""
C12CCCC(N2)CCCC1 6-and-7-bridge-rings-with-N
C1CCCCN1 6-ring
C1CCCCCN1 7-ring
C1CCCCCCCC1 9-ring
NC1CCCCCC1 N+7-ring
C1CC1CCCCCC 3-ring-with-tail
C12CCCC(O2)CCCC1 6-and-7-bridge-rings-with-O
""")
def SELECT(mols, *offsets):
return [mols[offset - 1] for offset in offsets]
class TestRingMatchesRingOnly(MCSTestCase):
# C12CCCC(N2)CCCC1 6-and-7-bridge-rings-with-N
# C1CCCCN1 6-ring
# C1CCCCCN1 7-ring
# C1CCCCCCCC1 9-ring
# NC1CCCCCC1 N+7-ring
# C1CC1CCCCCC 3-ring-with-tail
# C12CCCC(O2)CCCC1 6-and-7-bridge-rings-with-O
def test_default(self):
# Should match 'CCCCC'
self.assert_search(ring_mols, 5, 4)
def test_ring_only(self):
# Should match "CCC"
self.assert_search(ring_mols, 3, 2, ringMatchesRingOnly=True)
def test_ring_only_select_1_2(self):
# Should match "C1CCCCCN1"
self.assert_search(SELECT(ring_mols, 1, 2), 6, 6, ringMatchesRingOnly=True)
def test_ring_only_select_1_3(self):
# Should match "C1CCCCCCN1"
self.assert_search(SELECT(ring_mols, 1, 3), 7, 7, ringMatchesRingOnly=True)
def test_ring_only_select_1_4(self):
# Should match "C1CCCCCCCC1"
self.assert_search(SELECT(ring_mols, 1, 4), 9, 9, ringMatchesRingOnly=True)
def test_select_1_5(self):
# Should match "NCCCCCC"
self.assert_search(SELECT(ring_mols, 1, 5), 8, 7, ringMatchesRingOnly=False)
def test_ring_only_select_1_5(self):
# Should match "CCCCCC"
self.assert_search(SELECT(ring_mols, 1, 5), 7, 6, ringMatchesRingOnly=True)
def test_select_1_6(self):
# Should match "CCCCCCCCC" by breaking one of the 3-carbon ring bonds
self.assert_search(SELECT(ring_mols, 1, 6), 9, 8)
def test_ring_only_select_1_6(self):
# Should match "CCC" from the three atom ring
self.assert_search(SELECT(ring_mols, 1, 6), 3, 2, ringMatchesRingOnly=True)
def test_ring_only_select_1_7(self):
# Should match the outer ring "C1CCCCCCCC1"
self.assert_search(SELECT(ring_mols, 1, 7), 9, 9)
def test_ring_only_select_1_7_any_atoms(self):
# Should match everything
self.assert_search(SELECT(ring_mols, 1, 7), 10, 11, ringMatchesRingOnly=True, atomCompare="any")
class TestCompleteRingsOnly(MCSTestCase):
# C12CCCC(N2)CCCC1 6-and-7-bridge-rings-with-N
# C1CCCCN1 6-ring
# C1CCCCCN1 7-ring
# C1CCCCCCCC1 9-ring
# NC1CCCCCC1 N+7-ring
# C1CC1CCCCCC 3-ring-with-tail
# C12CCCC(O2)CCCC1 6-and-7-bridge-rings-with-O
def test_ring_only(self):
# No match: "CCC" is not in a ring
self.assert_search(ring_mols, -1, -1, completeRingsOnly=True)
def test_ring_only_select_1_2(self):
# Should match "C1CCCCCN1"
self.assert_search(SELECT(ring_mols, 1, 2), 6, 6, completeRingsOnly=True)
def test_ring_only_select_1_3(self):
# Should match "C1CCCCCCN1"
self.assert_search(SELECT(ring_mols, 1, 3), 7, 7, completeRingsOnly=True)
def test_ring_only_select_1_4(self):
# Should match "C1CCCCCCCC1"
self.assert_search(SELECT(ring_mols, 1, 4), 9, 9, completeRingsOnly=True)
def test_ring_only_select_1_5(self):
# No match: "CCCCCC" is not in a ring
self.assert_search(SELECT(ring_mols, 1, 5), -1, -1, completeRingsOnly=True)
def test_ring_only_select_1_7(self):
# Should match the outer ring "C1CCCCCCCC1"
self.assert_search(SELECT(ring_mols, 1, 7), 9, 9, completeRingsOnly=True)
def test_ring_only_select_1_7_any_atoms(self):
# Should match everything
self.assert_search(SELECT(ring_mols, 1, 7), 10, 11, completeRingsOnly=True, atomCompare="any")
def test_ring_to_nonring_bond(self):
# Should allow the cO in phenol to match the CO in the other structure
self.assert_search(atomtype_mols, 2, 1, completeRingsOnly=True)
lengthy_mols = [Chem.MolFromSmiles("Nc1ccccc1" * 20), Chem.MolFromSmiles("Nc1ccccccccc1" * 20)]
class TestTimeout(MCSTestCase):
# This should take over two minutes to process. Give it 0.1 seconds.
def test_timeout(self):
t1 = time.time()
result = MCS.FindMCS(lengthy_mols, timeout=0.1)
self.assert_result(result, completed=0)
self.assertTrue(result.numAtoms > 1)
self.assertTrue(result.numBonds >= result.numAtoms - 1, (result.numAtoms, result.numBonds))
t2 = time.time()
self.assertTrue(t2 - t1 < 0.5, t2 - t1)
# Check for non-negative values
def test_timeout_negative(self):
try:
MCS.FindMCS(lengthy_mols, timeout=-1)
except ValueError:
pass
else:
raise AssertionError("bad range check for timeout")
if __name__ == "__main__":
unittest.main()
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "cone"
_path_str = "cone.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.cone.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
Returns
-------
plotly.graph_objs.cone.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.cone.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.cone.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.cone.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
from __future__ import division
from cyvlfeat.kmeans import kmeans, kmeans_quantize, ikmeans, ikmeans_push, hikmeans, hikmeans_push
import numpy as np
def set_distance(values1, values2):
"""
Compare two sets of values and returns the maximum distance between the closest pairs
Parameters
----------
values1 : NxD set of values
values2 : MxD set of values
Returns
-------
Distance
"""
dimension = values1.shape[1]
assert values2.shape[1] == dimension
dist = np.sqrt(np.sum(
np.power(values1.reshape((-1, 1, dimension)) - values2.reshape((1, -1, dimension)), 2),
axis=2))*1/dimension
assert isinstance(dist, np.ndarray)
assert dist.shape == (values1.shape[0], values2.shape[0])
return max(np.max(np.min(dist, axis=0)), np.max(np.min(dist, axis=1)))
def test_kmeans_float():
num_data = 50
num_centers = 4
dimension = 8
noise_level = 0.1
centers = np.random.random_integers(-40, 40, (num_centers, dimension)).astype(np.float32)
data = np.empty((num_data, dimension), dtype=np.float32)
for i in range(num_data):
data[i] = centers[i % num_centers] + np.random.random_sample(dimension)*noise_level
found_centers = kmeans(data, num_centers, initialization="PLUSPLUS")
found_assignments = kmeans_quantize(data, found_centers)
assert found_centers.dtype == np.float32
assert found_centers.shape == (num_centers, dimension)
assert found_assignments.dtype == np.uint32
assert found_assignments.shape == (num_data,)
dist = set_distance(centers, found_centers)
assert dist <= noise_level, dist
for i in range(num_centers):
for j in range(num_centers):
if i != j:
assert found_assignments[i] != found_assignments[j]
for i in range(num_data):
assert found_assignments[i] == found_assignments[i % num_centers]
def test_kmeans_double():
num_data = 50
num_centers = 4
dimension = 8
noise_level = 0.1
centers = np.random.random_integers(-40, 40, (num_centers, dimension)).astype(np.float64)
data = np.empty((num_data, dimension), dtype=np.float64)
for i in range(num_data):
data[i] = centers[i % num_centers] + np.random.random_sample(dimension)*noise_level
found_centers = kmeans(data, num_centers, initialization="PLUSPLUS")
found_assignments = kmeans_quantize(data, found_centers)
assert found_centers.dtype == np.float64
assert found_centers.shape == (num_centers, dimension)
assert found_assignments.dtype == np.uint32
assert found_assignments.shape == (num_data,)
dist = set_distance(centers, found_centers)
assert dist <= noise_level, dist
for i in range(num_centers):
for j in range(num_centers):
if i != j:
assert found_assignments[i] != found_assignments[j]
for i in range(num_data):
assert found_assignments[i] == found_assignments[i % num_centers]
def test_kmeans_ANN():
num_data = 5000
num_centers = 4
dimension = 8
noise_level = 0.1
centers = np.random.random_integers(-40, 40, (num_centers, dimension)).astype(np.float32)
data = np.empty((num_data, dimension), dtype=np.float32)
for i in range(num_data):
data[i] = centers[i % num_centers] + np.random.random_sample(dimension)*noise_level
found_centers = kmeans(data, num_centers, initialization="PLUSPLUS", algorithm="ANN")
found_assignments = kmeans_quantize(data, found_centers, algorithm="ANN")
assert found_centers.dtype == np.float32
assert found_centers.shape == (num_centers, dimension)
dist = set_distance(centers, found_centers)
assert dist <= noise_level, dist
for i in range(num_centers):
for j in range(num_centers):
if i != j:
assert found_assignments[i] != found_assignments[j]
for i in range(num_data):
assert found_assignments[i] == found_assignments[i % num_centers]
def test_ikmeans():
num_data = 5000
num_centers = 40
dimension = 128
noise_level = 3
centers = np.random.random_integers(0, 200, (num_centers, dimension)).astype(np.uint8)
data = np.empty((num_data, dimension), dtype=np.uint8)
for i in range(num_data):
data[i] = centers[i % num_centers]
data = data + np.random.random_integers(0, noise_level, (num_data, dimension)).astype(np.uint8)
found_centers, found_assignments = ikmeans(data, num_centers)
assert found_centers.dtype == np.int32
assert found_centers.shape == (num_centers, dimension)
assert found_assignments.dtype == np.uint32
assert found_assignments.shape == (num_data,)
# Because the initialization is random, these tests does not work all the time. Two clusters might be merged.
# dist = set_distance(centers.astype(np.float32), found_centers.astype(np.float32))
# assert dist <= noise_level, dist
# for i in range(num_centers):
# for j in range(num_centers):
# if i != j:
# assert found_assignments[i] != found_assignments[j]
# for i in range(num_data):
# assert found_assignments[i] == found_assignments[i % num_centers]
assignments_2 = ikmeans_push(data, found_centers)
assert np.allclose(found_assignments, assignments_2)
def test_ikmeans_2():
num_data = 5000
num_centers = 2
dimension = 2
noise_level = 3
centers = np.array([[0, 0], [50, 100]], dtype=np.uint8)
data = np.empty((num_data, dimension), dtype=np.uint8)
for i in range(num_data):
data[i] = centers[i % num_centers]
data = data + np.random.random_integers(0, noise_level, (num_data, dimension)).astype(np.uint8)
found_centers, found_assignments = ikmeans(data, num_centers)
assert found_centers.dtype == np.int32
assert found_centers.shape == (num_centers, dimension)
assert found_assignments.dtype == np.uint32
assert found_assignments.shape == (num_data,)
dist = set_distance(centers.astype(np.float32), found_centers.astype(np.float32))
assert dist <= noise_level, dist
for i in range(num_centers):
for j in range(num_centers):
if i != j:
assert found_assignments[i] != found_assignments[j]
for i in range(num_data):
assert found_assignments[i] == found_assignments[i % num_centers]
assignments_2 = ikmeans_push(data, found_centers)
assert np.allclose(found_assignments, assignments_2)
def test_hikmeans():
num_data = 5000
num_centers = 40
dimension = 128
noise_level = 3
centers = np.random.random_integers(0, 200, (num_centers, dimension)).astype(np.uint8)
data = np.empty((num_data, dimension), dtype=np.uint8)
for i in range(num_data):
data[i] = centers[i % num_centers]
data = data + np.random.random_integers(0, noise_level, (num_data, dimension)).astype(np.uint8)
tree_structure, found_assignments = hikmeans(data, 4, 64)
assert found_assignments.dtype == np.uint32
assert found_assignments.shape == (num_data, tree_structure.depth)
assignments_2 = hikmeans_push(data, tree_structure)
assert np.allclose(found_assignments, assignments_2)
|
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for shim used to translate gsutil command to gcloud storage."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import enum
import os
import re
import subprocess
from boto import config
from gslib import exception
from gslib.utils import constants
class HIDDEN_SHIM_MODE(enum.Enum):
NO_FALLBACK = 'no_fallback'
DRY_RUN = 'dry_run'
NONE = 'none'
DECRYPTION_KEY_REGEX = re.compile(r'^decryption_key([1-9]$|[1-9][0-9]$|100$)')
# Required for headers translation and boto config translation.
DATA_TRANSFER_COMMANDS = frozenset(['cp', 'mv', 'rsync'])
ENCRYPTION_SUPPORTED_COMMANDS = DATA_TRANSFER_COMMANDS | frozenset(['ls'])
PRECONDITONS_ONLY_SUPPORTED_COMMANDS = frozenset(
['compose', 'rewrite', 'rm', 'retention'])
DATA_TRANSFER_HEADERS = frozenset([
'cache-control',
'content-disposition',
'content-encoding',
'content-md5',
'content-language',
'content-type',
'custom-time',
])
PRECONDITIONS_HEADERS = frozenset(
['x-goog-if-generation-match', 'x-goog-if-metageneration-match'])
# The format for _BOTO_CONFIG_MAP is as follows:
# {
# 'Boto section name': {
# 'boto field name': 'correspnding env variable name in Cloud SDK'
# }
# }
_BOTO_CONFIG_MAP = {
'Credentials': {
'aws_access_key_id':
'AWS_ACCESS_KEY_ID',
'aws_secret_access_key':
'AWS_SECRET_ACCESS_KEY',
'use_client_certificate':
'CLOUDSDK_CONTEXT_AWARE_USE_CLIENT_CERTIFICATE',
},
'Boto': {
'proxy': 'CLOUDSDK_PROXY_ADDRESS',
'proxy_type': 'CLOUDSDK_PROXY_TYPE',
'proxy_port': 'CLOUDSDK_PROXY_PORT',
'proxy_user': 'CLOUDSDK_PROXY_USERNAME',
'proxy_pass': 'CLOUDSDK_PROXY_PASSWORD',
'proxy_rdns': 'CLOUDSDK_PROXY_RDNS',
'http_socket_timeout': 'CLOUDSDK_CORE_HTTP_TIMEOUT',
'ca_certificates_file': 'CLOUDSDK_CORE_CUSTOM_CA_CERTS_FILE',
'max_retry_delay': 'CLOUDSDK_STORAGE_BASE_RETRY_DELAY',
'num_retries': 'CLOUDSDK_STORAGE_MAX_RETRIES',
},
'GSUtil': {
'check_hashes':
'CLOUDSDK_STORAGE_CHECK_HASHES',
'default_project_id':
'CLOUDSDK_CORE_PROJECT',
'disable_analytics_prompt':
'CLOUDSDK_CORE_DISABLE_USAGE_REPORTING',
'use_magicfile':
'CLOUDSDK_STORAGE_USE_MAGICFILE',
'parallel_composite_upload_threshold':
'CLOUDSDK_STORAGE_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD'
},
'OAuth2': {
'client_id': 'CLOUDSDK_AUTH_CLIENT_ID',
'client_secret': 'CLOUDSDK_AUTH_CLIENT_SECRET',
'provider_authorization_uri': 'CLOUDSDK_AUTH_AUTH_HOST',
'provider_token_uri': 'CLOUDSDK_AUTH_TOKEN_HOST',
},
}
_REQUIRED_BOTO_CONFIG_NOT_YET_SUPPORTED = frozenset(
# TOTO(b/214245419) Remove this once STET is support and add equivalent
# mapping above.
['stet_binary_path', 'stet_config_path'])
def get_flag_from_header(header_key_raw, header_value, unset=False):
"""Returns the gcloud storage flag for the given gsutil header.
Args:
header_key_raw: The header key.
header_value: The header value
unset: If True, the equivalent clear/remove flag is returned instead of the
setter flag. This only applies to setmeta.
Returns:
A string representing the equivalent gcloud storage flag and value, if
translation is possible, else returns None.
Examples:
>> get_flag_from_header('Cache-Control', 'val')
--cache-control=val
>> get_flag_from_header('x-goog-meta-foo', 'val')
--add-custom-metadata=foo=val
>> get_flag_from_header('x-goog-meta-foo', 'val', unset=True)
--remove-custom-metadata=foo
"""
header = header_key_raw.lower()
if header in PRECONDITIONS_HEADERS:
flag_name = header.lstrip('x-goog-')
elif header in DATA_TRANSFER_HEADERS:
flag_name = header
else:
flag_name = None
if flag_name is not None:
if unset:
if header in PRECONDITIONS_HEADERS or header == 'content-md5':
# Precondition headers and content-md5 cannot be cleared.
return None
else:
return '--clear-' + flag_name
return '--{}={}'.format(flag_name, header_value)
for header_prefix in ('x-goog-meta-', 'x-amz-meta-'):
if header.startswith(header_prefix):
metadata_key = header.lstrip(header_prefix)
if unset:
return '--remove-custom-metadata=' + metadata_key
else:
return '--add-custom-metadata={}={}'.format(metadata_key, header_value)
if header.startswith('x-amz-'):
# Send the entire header as it is.
if unset:
return '--remove-custom-headers=' + header
else:
return '--add-custom-headers={}={}'.format(header, header_value)
return None
class GcloudStorageFlag(object):
def __init__(self, gcloud_flag, supports_output_translation=False):
"""Initializes GcloudStorageFlag.
Args:
gcloud_flag (str): The name of the gcloud flag.
support_output_translation (bool): If True, this flag in gcloud storage
supports printing gsutil formatted output.
"""
self.gcloud_flag = gcloud_flag
self.supports_output_translation = supports_output_translation
class GcloudStorageMap(object):
"""Mapping to translate gsutil command to its gcloud storage equivalent."""
def __init__(self,
gcloud_command,
flag_map,
supports_output_translation=False):
"""Intalizes GcloudStorageMap.
Args:
gcloud_command (dict|str): The corresponding name of the command to be
called in gcloud. If this command supports sub-commands, then this
field must be a dict of sub-command-name:GcloudStorageMap pairs.
flag_map (dict): A dict of str to GcloudStorageFlag. Mapping of gsutil
flags to their equivalent gcloud storage flag names.
supports_output_translation (bool): Indicates if the corresponding
gcloud storage command supports the printing gsutil formatted output.
"""
self.gcloud_command = gcloud_command
self.flag_map = flag_map
self.supports_output_translation = supports_output_translation
def _get_gcloud_binary_path():
# GCLOUD_BINARY_PATH is used for testing purpose only.
# It helps to run the parity_check.py script directly without having
# to build gcloud.
gcloud_binary_path = os.environ.get('GCLOUD_BINARY_PATH')
if gcloud_binary_path:
return gcloud_binary_path
cloudsdk_root = os.environ.get('CLOUDSDK_ROOT_DIR')
if cloudsdk_root is None:
raise exception.GcloudStorageTranslationError(
'Requested to use "gcloud storage" but the gcloud binary path cannot'
' be found. This might happen if you attempt to use gsutil that was'
' not installed via Cloud SDK. You can manually set the'
' `CLOUDSDK_ROOT_DIR` environment variable to point to the'
' google-cloud-sdk installation directory to resolve the issue.'
' Alternatively, you can set `use_gcloud_storage=False` to disable'
' running the command using gcloud storage.')
return os.path.join(cloudsdk_root, 'bin', 'gcloud')
def _get_gcs_json_endpoint_from_boto_config(config):
gs_json_host = config.get('Credentials', 'gs_json_host')
if gs_json_host:
gs_json_port = config.get('Credentials', 'gs_json_port')
port = ':' + gs_json_port if gs_json_port else ''
json_api_version = config.get('Credentials', 'json_api_version', 'v1')
return 'https://{}{}/storage/{}'.format(gs_json_host, port,
json_api_version)
return None
def _get_s3_endpoint_from_boto_config(config):
s3_host = config.get('Credentials', 's3_host')
if s3_host:
s3_port = config.get('Credentials', 's3_port')
port = ':' + s3_port if s3_port else ''
return 'https://{}{}'.format(s3_host, port)
return None
class GcloudStorageCommandMixin(object):
"""Provides gcloud storage translation functionality.
The command.Command class must inherit this class in order to support
converting the gsutil command to it's gcloud storage equivalent.
"""
# Mapping for translating gsutil command to gcloud storage.
gcloud_storage_map = None
def __init__(self):
self._translated_gcloud_storage_command = None
self._translated_env_variables = None
def _get_gcloud_storage_args(self, sub_opts, gsutil_args, gcloud_storage_map):
if gcloud_storage_map is None:
raise exception.GcloudStorageTranslationError(
'Command "{}" cannot be translated to gcloud storage because the'
' translation mapping is missing.'.format(self.command_name))
args = []
if isinstance(gcloud_storage_map.gcloud_command, str):
args = gcloud_storage_map.gcloud_command.split()
elif isinstance(gcloud_storage_map.gcloud_command, dict):
# If a command has sub-commands, e.g gsutil pap set, gsutil pap get.
# All the flags mapping must be present in the subcommand's map
# because gsutil does not have command specific flags
# if sub-commands are present.
if gcloud_storage_map.flag_map:
raise ValueError(
'Flags mapping should not be present at the top-level command if '
'a sub-command is used. Command: {}.'.format(self.command_name))
sub_command = gsutil_args[0]
sub_opts, parsed_args = self.ParseSubOpts(
args=gsutil_args[1:], should_update_sub_opts_and_args=False)
return self._get_gcloud_storage_args(
sub_opts, parsed_args,
gcloud_storage_map.gcloud_command.get(sub_command))
else:
raise ValueError('Incorrect mapping found for "{}" command'.format(
self.command_name))
if sub_opts:
for option, value in sub_opts:
if option not in gcloud_storage_map.flag_map:
raise exception.GcloudStorageTranslationError(
'Command option "{}" cannot be translated to'
' gcloud storage'.format(option))
args.append(gcloud_storage_map.flag_map[option].gcloud_flag)
if value != '':
# Empty string represents that the user did not passed in a value
# for the flag.
args.append(value)
return args + gsutil_args
def _translate_top_level_flags(self):
"""Translates gsutil's top level flags.
Gsutil specifies the headers (-h) and boto config (-o) as top level flags
as well, but we handle those separately.
Returns:
A tuple. The first item is a list of top level flags that can be appended
to the gcloud storage command. The second item is a dict of environment
variables that can be set for the gcloud storage command execution.
"""
top_level_flags = []
env_variables = {}
if self.debug >= 3:
top_level_flags.extend(['--verbosity', 'debug'])
if self.debug == 4:
top_level_flags.append('--log-http')
if self.quiet_mode:
top_level_flags.append('--no-user-output-enabled')
if self.user_project:
top_level_flags.append('--billing-project=' + self.user_project)
if self.trace_token:
top_level_flags.append('--trace-token=' + self.trace_token)
if constants.IMPERSONATE_SERVICE_ACCOUNT:
top_level_flags.append('--impersonate-service-account=' +
constants.IMPERSONATE_SERVICE_ACCOUNT)
# TODO(b/208294509) Add --perf-trace-token translation.
if not self.parallel_operations:
# TODO(b/208301084) Set the --sequential flag instead.
env_variables['CLOUDSDK_STORAGE_THREAD_COUNT'] = '1'
env_variables['CLOUDSDK_STORAGE_PROCESS_COUNT'] = '1'
return top_level_flags, env_variables
def _translate_headers(self):
"""Translates gsutil headers to equivalent gcloud storage flags."""
flags = []
for header_key_raw, header_value in self.headers.items():
header_key = header_key_raw.lower()
if header_key == 'x-goog-api-version':
# Gsutil adds this header. We don't have to translate it for gcloud.
continue
flag = get_flag_from_header(header_key, header_value)
if self.command_name in DATA_TRANSFER_COMMANDS:
if flag is None:
raise exception.GcloudStorageTranslationError(
'Header cannot be translated to a gcloud storage equivalent'
' flag. Invalid header: {}:{}'.format(header_key, header_value))
else:
flags.append(flag)
elif (self.command_name in PRECONDITONS_ONLY_SUPPORTED_COMMANDS and
header_key in PRECONDITIONS_HEADERS):
flags.append(flag)
# We ignore the headers for all other cases, so does gsutil.
return flags
def _translate_boto_config(self):
"""Translates boto config options to gcloud storage properties.
Returns:
A tuple where first element is a list of flags and the second element is
a dict representing the env variables that can be set to set the
gcloud storage properties.
"""
flags = []
env_vars = {}
# Handle gs_json_host and gs_json_port.
gcs_json_endpoint = _get_gcs_json_endpoint_from_boto_config(config)
if gcs_json_endpoint:
env_vars['CLOUDSDK_API_ENDPOINT_OVERRIDES_STORAGE'] = gcs_json_endpoint
# Handle s3_host and s3_port.
s3_endpoint = _get_s3_endpoint_from_boto_config(config)
if s3_endpoint:
env_vars['CLOUDSDK_STORAGE_S3_ENDPOINT_URL'] = s3_endpoint
decryption_keys = []
for section_name, section in config.items():
for key, value in section.items():
if (key == 'encryption_key' and
self.command_name in ENCRYPTION_SUPPORTED_COMMANDS):
flags.append('--encryption-key=' + value)
# Boto config can have decryption keys in the form of
# decryption_key1..100.
elif (DECRYPTION_KEY_REGEX.match(key) and
self.command_name in ENCRYPTION_SUPPORTED_COMMANDS):
decryption_keys.append(value)
elif (key == 'content_language' and
self.command_name in DATA_TRANSFER_COMMANDS):
flags.append('--content-language=' + value)
elif key in _REQUIRED_BOTO_CONFIG_NOT_YET_SUPPORTED:
self.logger.error('The boto config field {}:{} cannot be translated'
' to gcloud storage equivalent.'.format(
section_name, key))
elif key == 'https_validate_certificates' and not value:
env_vars['CLOUDSDK_AUTH_DISABLE_SSL_VALIDATION'] = True
else:
env_var = _BOTO_CONFIG_MAP.get(section_name, {}).get(key, None)
if env_var is not None:
env_vars[env_var] = value
if decryption_keys:
flags.append('--decryption-keys=' + ','.join(decryption_keys))
return flags, env_vars
def get_gcloud_storage_args(self):
"""Translates the gsutil command flags to gcloud storage flags.
It uses the command_spec.gcloud_storage_map field that provides the
translation mapping for all the flags.
Returns:
A list of all the options and arguments that can be used with the
equivalent gcloud storage command.
Raises:
GcloudStorageTranslationError: If a flag or command cannot be translated.
ValueError: If there is any issue with the mapping provided by
GcloudStorageMap.
"""
return self._get_gcloud_storage_args(self.sub_opts, self.args,
self.gcloud_storage_map)
def _print_gcloud_storage_command_info(self,
gcloud_command,
env_variables,
dry_run=False):
logger_func = self.logger.info if dry_run else self.logger.debug
logger_func('Gcloud Storage Command: {}'.format(' '.join(gcloud_command)))
if env_variables:
logger_func('Environment variables for Gcloud Storage:')
for k, v in env_variables.items():
logger_func('%s=%s', k, v)
def translate_to_gcloud_storage_if_requested(self):
"""Translates the gsutil command to gcloud storage equivalent.
The translated commands get stored at
self._translated_gcloud_storage_command.
This command also translate the boto config, which gets stored as a dict
at self._translated_env_variables
Returns:
True if the command was successfully translated, else False.
"""
if self.command_name == 'version' or self.command_name == 'test':
# Running any command in debug mode will lead to calling gsutil version
# command. We don't want to translate the version command as this
# should always reflect the version that gsutil is using.
# We don't want to run any translation for the "test" command.
return False
use_gcloud_storage = config.getbool('GSUtil', 'use_gcloud_storage', False)
try:
hidden_shim_mode = HIDDEN_SHIM_MODE(
config.get('GSUtil', 'hidden_shim_mode', 'none'))
except ValueError:
raise exception.CommandException(
'Invalid option specified for'
' GSUtil:hidden_shim_mode config setting. Should be one of: {}'.
format(' | '.join([x.value for x in HIDDEN_SHIM_MODE])))
if use_gcloud_storage:
try:
top_level_flags, env_variables = self._translate_top_level_flags()
header_flags = self._translate_headers()
flags_from_boto, env_vars_from_boto = self._translate_boto_config()
env_variables.update(env_vars_from_boto)
gcloud_binary_path = _get_gcloud_binary_path()
gcloud_storage_command = ([gcloud_binary_path] +
self.get_gcloud_storage_args() +
top_level_flags + header_flags +
flags_from_boto)
if hidden_shim_mode == HIDDEN_SHIM_MODE.DRY_RUN:
self._print_gcloud_storage_command_info(gcloud_storage_command,
env_variables,
dry_run=True)
elif not os.environ.get('CLOUDSDK_CORE_PASS_CREDENTIALS_TO_GSUTIL'):
raise exception.GcloudStorageTranslationError(
'Requested to use "gcloud storage" but gsutil is not using the'
' same credentials as gcloud.'
' You can make gsutil use the same credentials by running:\n'
'{} config set pass_credentials_to_gsutil True'.format(
gcloud_binary_path))
else:
self._print_gcloud_storage_command_info(gcloud_storage_command,
env_variables)
self._translated_gcloud_storage_command = gcloud_storage_command
self._translated_env_variables = env_variables
return True
except exception.GcloudStorageTranslationError as e:
# Raise error if no_fallback mode has been requested. This mode
# should only be used for debuggling and testing purposes.
if hidden_shim_mode == HIDDEN_SHIM_MODE.NO_FALLBACK:
raise exception.CommandException(e)
# For all other cases, we want to run gsutil.
self.logger.error(
'Cannot translate gsutil command to gcloud storage.'
' Going to run gsutil command. Error: %s', e)
return False
def run_gcloud_storage(self):
subprocess_envs = os.environ.copy()
subprocess_envs.update(self._translated_env_variables)
process = subprocess.run(self._translated_gcloud_storage_command,
env=subprocess_envs)
return process.returncode
|
|
# -*- coding: utf-8 -*-
"""HTML widgets for GUIs."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import json
import logging
from functools import partial
from qtconsole.rich_jupyter_widget import RichJupyterWidget
from qtconsole.inprocess import QtInProcessKernelManager
from .qt import (
WebView, QObject, QWebChannel, QWidget, QGridLayout, QPlainTextEdit,
QLabel, QLineEdit, QCheckBox, QSpinBox, QDoubleSpinBox,
pyqtSlot, _static_abs_path, _block, Debouncer)
from phylib.utils import emit, connect
from phy.utils.color import colormaps, _is_bright
from phylib.utils._misc import _CustomEncoder, read_text, _pretty_floats
from phylib.utils._types import _is_integer
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# IPython widget
# -----------------------------------------------------------------------------
class IPythonView(RichJupyterWidget):
"""A view with an IPython console living in the same Python process as the GUI."""
def __init__(self, *args, **kwargs):
super(IPythonView, self).__init__(*args, **kwargs)
def start_kernel(self):
"""Start the IPython kernel."""
logger.debug("Starting the kernel.")
self.kernel_manager = QtInProcessKernelManager()
self.kernel_manager.start_kernel(show_banner=False)
self.kernel_manager.kernel.gui = 'qt'
self.kernel = self.kernel_manager.kernel
self.shell = self.kernel.shell
try:
self.kernel_client = self.kernel_manager.client()
self.kernel_client.start_channels()
except Exception as e: # pragma: no cover
logger.error("Could not start IPython kernel: %s.", str(e))
self.set_default_style('linux')
self.exit_requested.connect(self.stop)
def inject(self, **kwargs):
"""Inject variables into the IPython namespace."""
logger.debug("Injecting variables into the kernel: %s.", ', '.join(kwargs.keys()))
try:
self.kernel.shell.push(kwargs)
except Exception as e: # pragma: no cover
logger.error("Could not inject variables to the IPython kernel: %s.", str(e))
def attach(self, gui, **kwargs):
"""Add the view to the GUI, start the kernel, and inject the specified variables."""
gui.add_view(self)
self.start_kernel()
self.inject(gui=gui, **kwargs)
try:
import numpy
self.inject(np=numpy)
except ImportError: # pragma: no cover
pass
try:
import matplotlib.pyplot as plt
self.inject(plt=plt)
except ImportError: # pragma: no cover
pass
@connect(sender=self)
def on_close_view(view, gui):
self.stop()
def stop(self):
"""Stop the kernel."""
logger.debug("Stopping the kernel.")
try:
self.kernel_client.stop_channels()
self.kernel_manager.shutdown_kernel()
except Exception as e: # pragma: no cover
logger.error("Could not stop the IPython kernel: %s.", str(e))
# -----------------------------------------------------------------------------
# HTML widget
# -----------------------------------------------------------------------------
# Default CSS style of HTML widgets.
_DEFAULT_STYLE = """
* {
font-size: 8pt !important;
}
html, body, table {
background-color: black;
color: white;
font-family: sans-serif;
font-size: 12pt;
margin: 2px 4px;
}
input.filter {
width: 100% !important;
}
table tr[data-is_masked='true'] {
color: #888;
}
"""
# Bind the JS events to Python.
_DEFAULT_SCRIPT = """
document.addEventListener("DOMContentLoaded", function () {
new QWebChannel(qt.webChannelTransport, function (channel) {
var eventEmitter = channel.objects.eventEmitter;
window.eventEmitter = eventEmitter;
// All phy_events emitted from JS are relayed to
// Python's emitJS().
document.addEventListener("phy_event", function (e) {
console.debug("Emit from JS global: " + e.detail.name + " " + e.detail.data);
eventEmitter.emitJS(e.detail.name, JSON.stringify(e.detail.data));
});
});
});
"""
# Default HTML template of the widgets.
_PAGE_TEMPLATE = """
<html>
<head>
<title>{title:s}</title>
{header:s}
</head>
<body>
{body:s}
</body>
</html>
"""
def _uniq(seq):
"""Return the list of unique integers in a sequence, by keeping the order."""
seen = set()
seen_add = seen.add
return [int(x) for x in seq if not (x in seen or seen_add(x))]
class Barrier(object):
"""Implement a synchronization barrier."""
def __init__(self):
self._keys = []
self._results = {}
self._callback_after_all = None
def _callback(self, key, *args, **kwargs):
self._results[key] = (args, kwargs)
if self._callback_after_all and self.have_all_finished():
self._callback_after_all()
def __call__(self, key):
self._keys.append(key)
return partial(self._callback, key)
def have_all_finished(self):
"""Whether all tasks have finished."""
return set(self._keys) == set(self._results.keys())
def wait(self):
"""Wait until all tasks have finished."""
_block(self.have_all_finished)
def after_all_finished(self, callback):
"""Specify the callback function to call after all tasks have finished."""
self._callback_after_all = callback
def result(self, key):
"""Return the result of a task specified by its key."""
return self._results.get(key, None)
class HTMLBuilder(object):
"""Build an HTML widget."""
def __init__(self, title=''):
self.title = title
self.headers = []
self.body = ''
self.add_style(_DEFAULT_STYLE)
def add_style(self, s):
"""Add a CSS style."""
self.add_header('<style>\n{}\n</style>'.format(s))
def add_style_src(self, filename):
"""Add a link to a stylesheet URL."""
self.add_header(('<link rel="stylesheet" type="text/css" href="{}" />').format(filename))
def add_script(self, s):
"""Add Javascript code."""
self.add_header('<script>{}</script>'.format(s))
def add_script_src(self, filename):
"""Add a link to a Javascript file."""
self.add_header('<script src="{}"></script>'.format(filename))
def add_header(self, s):
"""Add HTML headers."""
self.headers.append(s)
def set_body_src(self, filename):
"""Set the path to an HTML file containing the body of the widget."""
path = _static_abs_path(filename)
self.set_body(read_text(path))
def set_body(self, body):
"""Set the HTML body of the widget."""
self.body = body
def _build_html(self):
"""Build the HTML page."""
header = '\n'.join(self.headers)
html = _PAGE_TEMPLATE.format(title=self.title, header=header, body=self.body)
return html
@property
def html(self):
"""Return the reconstructed HTML code of the widget."""
return self._build_html()
class JSEventEmitter(QObject):
"""Object used to relay the Javascript events to Python. Some vents can be debounced so that
there is a minimal delay between two consecutive events of the same type."""
_parent = None
def __init__(self, *args, debounce_events=()):
super(JSEventEmitter, self).__init__(*args)
self._debouncer = Debouncer()
self._debounce_events = debounce_events
@pyqtSlot(str, str)
def emitJS(self, name, arg_json):
logger.log(5, "Emit from Python %s %s.", name, arg_json)
args = str(name), self._parent, json.loads(str(arg_json))
# NOTE: debounce some events but not other events coming from JS.
# This is typically used for select events of table widgets.
if name in self._debounce_events:
self._debouncer.submit(emit, *args)
else:
emit(*args)
class HTMLWidget(WebView):
"""An HTML widget that is displayed with Qt, with Javascript support and Python-Javascript
interactions capabilities. These interactions are asynchronous in Qt5, which requires
extensive use of callback functions in Python, as well as synchronization primitives
for unit tests.
Constructor
------------
parent : Widget
title : window title
debounce_events : list-like
The list of event names, raised by the underlying HTML widget, that should be debounced.
"""
def __init__(self, *args, title='', debounce_events=()):
# Due to a limitation of QWebChannel, need to register a Python object
# BEFORE this web view is created?!
self._event = JSEventEmitter(*args, debounce_events=debounce_events)
self._event._parent = self
self.channel = QWebChannel(*args)
self.channel.registerObject('eventEmitter', self._event)
super(HTMLWidget, self).__init__(*args)
self.page().setWebChannel(self.channel)
self.builder = HTMLBuilder(title=title)
self.builder.add_script_src('qrc:///qtwebchannel/qwebchannel.js')
self.builder.add_script(_DEFAULT_SCRIPT)
@property
def debouncer(self):
"""Widget debouncer."""
return self._event._debouncer
def build(self, callback=None):
"""Rebuild the HTML code of the widget."""
self.set_html(self.builder.html, callback=callback)
def view_source(self, callback=None):
"""View the HTML source of the widget."""
return self.eval_js(
"document.getElementsByTagName('html')[0].innerHTML", callback=callback)
# Javascript methods
# -------------------------------------------------------------------------
def eval_js(self, expr, callback=None):
"""Evaluate a Javascript expression.
Parameters
----------
expr : str
A Javascript expression.
callback : function
A Python function that is called once the Javascript expression has been
evaluated. It takes as input the output of the Javascript expression.
"""
logger.log(5, "%s eval JS %s", self.__class__.__name__, expr)
return self.page().runJavaScript(expr, callback or (lambda _: _))
# -----------------------------------------------------------------------------
# HTML table
# -----------------------------------------------------------------------------
def dumps(o):
"""Dump a JSON object into a string, with pretty floats."""
return json.dumps(_pretty_floats(o), cls=_CustomEncoder)
def _color_styles():
"""Use colormap colors in table widget."""
return '\n'.join(
'''
#table .color-%d > td[class='id'] {
background-color: rgb(%d, %d, %d);
%s
}
''' % (i, r, g, b, 'color: #000 !important;' if _is_bright((r, g, b)) else '')
for i, (r, g, b) in enumerate(colormaps.default * 255))
class Table(HTMLWidget):
"""A sortable table with support for selection. Derives from HTMLWidget.
This table uses the following Javascript implementation: https://github.com/kwikteam/tablejs
This Javascript class builds upon ListJS: https://listjs.com/
"""
_ready = False
def __init__(
self, *args, columns=None, value_names=None, data=None, sort=None, title='',
debounce_events=()):
super(Table, self).__init__(*args, title=title, debounce_events=debounce_events)
self._init_table(columns=columns, value_names=value_names, data=data, sort=sort)
def eval_js(self, expr, callback=None):
"""Evaluate a Javascript expression.
The `table` Javascript variable can be used to interact with the underlying Javascript
table.
The table has sortable columns, a filter text box, support for single and multi selection
of rows. Rows can be skippable (used for ignored clusters in phy).
The table can raise Javascript events that are relayed to Python. Objects are
transparently serialized and deserialized in JSON. Basic types (numbers, strings, lists)
are transparently converted between Python and Javascript.
Parameters
----------
expr : str
A Javascript expression.
callback : function
A Python function that is called once the Javascript expression has been
evaluated. It takes as input the output of the Javascript expression.
"""
# Avoid JS errors when the table is not yet fully loaded.
expr = 'if (typeof table !== "undefined") ' + expr
return super(Table, self).eval_js(expr, callback=callback)
def _init_table(self, columns=None, value_names=None, data=None, sort=None):
"""Build the table."""
columns = columns or ['id']
value_names = value_names or columns
data = data or []
b = self.builder
b.set_body_src('index.html')
b.add_style(_color_styles())
self.data = data
self.columns = columns
self.value_names = value_names
emit('pre_build', self)
data_json = dumps(self.data)
columns_json = dumps(self.columns)
value_names_json = dumps(self.value_names)
sort_json = dumps(sort)
b.body += '''
<script>
var data = %s;
var options = {
valueNames: %s,
columns: %s,
sort: %s,
};
var table = new Table('table', options, data);
</script>
''' % (data_json, value_names_json, columns_json, sort_json)
self.build(lambda html: emit('ready', self))
connect(event='select', sender=self, func=lambda *args: self.update(), last=True)
connect(event='ready', sender=self, func=lambda *args: self._set_ready())
def _set_ready(self):
"""Set the widget as ready."""
self._ready = True
def is_ready(self):
"""Whether the widget has been fully loaded."""
return self._ready
def sort_by(self, name, sort_dir='asc'):
"""Sort by a given variable."""
logger.log(5, "Sort by `%s` %s.", name, sort_dir)
self.eval_js('table.sort_("{}", "{}");'.format(name, sort_dir))
def filter(self, text=''):
"""Filter the view with a Javascript expression."""
logger.log(5, "Filter table with `%s`.", text)
self.eval_js('table.filter_("{}", true);'.format(text))
def get_ids(self, callback=None):
"""Get the list of ids."""
self.eval_js('table._getIds();', callback=callback)
def get_next_id(self, callback=None):
"""Get the next non-skipped row id."""
self.eval_js('table.getSiblingId(undefined, "next");', callback=callback)
def get_previous_id(self, callback=None):
"""Get the previous non-skipped row id."""
self.eval_js('table.getSiblingId(undefined, "previous");', callback=callback)
def first(self, callback=None):
"""Select the first item."""
self.eval_js('table.selectFirst();', callback=callback)
def last(self, callback=None):
"""Select the last item."""
self.eval_js('table.selectLast();', callback=callback)
def next(self, callback=None):
"""Select the next non-skipped row."""
self.eval_js('table.moveToSibling(undefined, "next");', callback=callback)
def previous(self, callback=None):
"""Select the previous non-skipped row."""
self.eval_js('table.moveToSibling(undefined, "previous");', callback=callback)
def select(self, ids, callback=None, **kwargs):
"""Select some rows in the table from Python.
This function calls `table.select()` in Javascript, which raises a Javascript event
relayed to Python. This sequence of actions is the same when the user selects
rows directly in the HTML view.
"""
ids = _uniq(ids)
assert all(_is_integer(_) for _ in ids)
self.eval_js('table.select({}, {});'.format(dumps(ids), dumps(kwargs)), callback=callback)
def scroll_to(self, id):
"""Scroll until a given row is visible."""
self.eval_js('table._scrollTo({});'.format(id))
def set_busy(self, busy):
"""Set the busy state of the GUI."""
self.eval_js('table.setBusy({});'.format('true' if busy else 'false'))
def get(self, id, callback=None):
"""Get the object given its id."""
self.eval_js('table.get("id", {})[0]["_values"]'.format(id), callback=callback)
def add(self, objects):
"""Add objects object to the table."""
if not objects:
return
self.eval_js('table.add_({});'.format(dumps(objects)))
def change(self, objects):
"""Change some objects."""
if not objects:
return
self.eval_js('table.change_({});'.format(dumps(objects)))
def remove(self, ids):
"""Remove some objects from their ids."""
if not ids:
return
self.eval_js('table.remove_({});'.format(dumps(ids)))
def remove_all(self):
"""Remove all rows in the table."""
self.eval_js('table.removeAll();')
def remove_all_and_add(self, objects):
"""Remove all rows in the table and add new objects."""
if not objects:
return self.remove_all()
self.eval_js('table.removeAllAndAdd({});'.format(dumps(objects)))
def get_selected(self, callback=None):
"""Get the currently selected rows."""
self.eval_js('table.selected()', callback=callback)
def get_current_sort(self, callback=None):
"""Get the current sort as a tuple `(name, dir)`."""
self.eval_js('table._currentSort()', callback=callback)
# -----------------------------------------------------------------------------
# KeyValueWidget
# -----------------------------------------------------------------------------
class KeyValueWidget(QWidget):
"""A Qt widget that displays a simple form where each field has a name, a type, and accept
user input."""
def __init__(self, *args, **kwargs):
super(KeyValueWidget, self).__init__(*args, **kwargs)
self._items = []
self._layout = QGridLayout(self)
def add_pair(self, name, default=None, vtype=None):
"""Add a key-value pair.
Parameters
----------
name : str
default : object
vtype : str
Can be 'str' (text box), 'int' (spin box), 'float' (spin box), 'bool' (checkbox),
'mutiline' (text edit for multiline str), or 'list' (several widgets).
"""
if isinstance(default, list):
# Take lists into account.
for i, value in enumerate(default):
self.add_pair('%s[%d]' % (name, i), default=value, vtype=vtype)
return
if vtype is None and default is not None:
vtype = type(default).__name__
if vtype == 'str':
widget = QLineEdit(self)
widget.setText(default or '')
elif vtype == 'multiline':
widget = QPlainTextEdit(self)
widget.setPlainText(default or '')
widget.setMinimumHeight(200)
widget.setMaximumHeight(400)
elif vtype == 'int':
widget = QSpinBox(self)
widget.setMinimum(-1e9)
widget.setMaximum(+1e9)
widget.setValue(default or 0)
elif vtype == 'float':
widget = QDoubleSpinBox(self)
widget.setMinimum(-1e9)
widget.setMaximum(+1e9)
widget.setValue(default or 0)
elif vtype == 'bool':
widget = QCheckBox(self)
widget.setChecked(default is True)
else: # pragma: no cover
raise ValueError("Not supported vtype: %s." % vtype)
widget.setMaximumWidth(400)
label = QLabel(name, self)
label.setMaximumWidth(150)
row = len(self._items)
self._layout.addWidget(label, row, 0)
self._layout.addWidget(widget, row, 1)
self.setLayout(self._layout)
self._items.append((name, vtype, default, widget))
@property
def names(self):
"""List of field names."""
return sorted(
set(i[0] if '[' not in i[0] else i[0][:i[0].index('[')] for i in self._items))
def get_widget(self, name):
"""Get the widget of a field."""
for name_, vtype, default, widget in self._items:
if name == name_:
return widget
def get_value(self, name):
"""Get the default or user-entered value of a field."""
# Detect if the requested name is a list type.
names = set(i[0] for i in self._items)
if '%s[0]' % name in names:
out = []
i = 0
namei = '%s[%d]' % (name, i)
while namei in names:
out.append(self.get_value(namei))
i += 1
namei = '%s[%d]' % (name, i)
return out
for name_, vtype, default, widget in self._items:
if name_ == name:
if vtype == 'str':
return str(widget.text())
elif vtype == 'multiline':
return str(widget.toPlainText())
elif vtype == 'int':
return int(widget.text())
elif vtype == 'float':
return float(widget.text().replace(',', '.'))
elif vtype == 'bool':
return bool(widget.isChecked())
def attach(self, gui): # pragma: no cover
"""Add the view to a GUI."""
gui.add_view(self)
def to_dict(self):
"""Return the key-value mapping dictionary as specified by the user inputs and defaults."""
return {name: self.get_value(name) for name in self.names}
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage automatic weight adjustments."""
import collections
import datetime
from google.cloud import ndb
import six
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.datastore import ndb_utils
from clusterfuzz._internal.google_cloud_utils import big_query
from clusterfuzz._internal.metrics import fuzzer_stats
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
from handlers import base_handler
from libs import handler
QuerySpecification = collections.namedtuple(
'QuerySpecification', ['query_format', 'formatter', 'reason'])
SpecificationMatch = collections.namedtuple('SpecificationMatch',
['new_weight', 'reason'])
DEFAULT_MULTIPLIER = 30.0 # Used for blackbox and jobs that are not yet run.
DEFAULT_SANITIZER_WEIGHT = 0.1
DEFAULT_ENGINE_WEIGHT = 1.0
TARGET_COUNT_WEIGHT_CAP = 100.0
SANITIZER_BASE_WEIGHT = 0.1
# TODO(ochang): architecture weights.
SANITIZER_WEIGHTS = {
'ASAN': 5 * SANITIZER_BASE_WEIGHT,
'CFI': 1 * SANITIZER_BASE_WEIGHT,
'MSAN': 2 * SANITIZER_BASE_WEIGHT,
'TSAN': 1 * SANITIZER_BASE_WEIGHT,
'UBSAN': 1 * SANITIZER_BASE_WEIGHT,
}
ENGINE_WEIGHTS = {
'libFuzzer': 1.0,
'afl': 1.0,
'honggfuzz': 0.2,
}
# Formatters for query specifications.
def _past_day_formatter(query_format, dataset):
"""Simple formatter to get stats for the past day."""
end_time = utils.utcnow().date()
start_time = end_time - datetime.timedelta(days=1)
return query_format.format(
dataset=dataset, start_time=start_time, end_time=end_time)
def _new_fuzzer_formatter(query_format, dataset):
"""Prepare a query to check for new fuzzers from the past week."""
now = utils.utcnow().date()
cutoff_time = now - datetime.timedelta(days=7)
return query_format.format(dataset=dataset, cutoff_time=cutoff_time)
def _coverage_formatter(query_format, dataset):
"""Prepare a query to check for changes in coverage week over week."""
end_date = utils.utcnow().date() - datetime.timedelta(days=1)
middle_date = end_date - datetime.timedelta(days=7)
start_date = end_date - datetime.timedelta(days=14)
return query_format.format(
dataset=dataset,
start_date=start_date,
middle_date=middle_date,
end_date=end_date)
# Most of our queries should simply average a field name to get a ratio showing
# how often some behavior occurs.
GENERIC_QUERY_FORMAT = """
SELECT
fuzzer,
job,
1.0 - (1.0 - {min_weight}) * AVG({field_name}) AS new_weight
FROM
{{dataset}}.TestcaseRun
WHERE
_PARTITIONTIME BETWEEN TIMESTAMP('{{start_time}}')
AND TIMESTAMP('{{end_time}}')
GROUP BY
fuzzer,
job
"""
# Heavily reduce the weight for fuzzers which frequently crash on startup. This
# is indicitave of a very serious problem that makes it highly unlikely that
# we'll find anything during fuzzing.
STARTUP_CRASH_SPECIFICATION = QuerySpecification(
query_format=GENERIC_QUERY_FORMAT.format(
field_name='startup_crash_count', min_weight=0.10),
formatter=_past_day_formatter,
reason='frequent startup crashes')
# Reduce weight somewhat for fuzzers with many slow units. If a particular unit
# runs for so long that we detect it as a slow unit, it usually means that the
# fuzzer is not making good use of its cycles while running or needs a fix.
SLOW_UNIT_SPECIFICATION = QuerySpecification(
query_format=GENERIC_QUERY_FORMAT.format(
field_name='slow_unit_count', min_weight=0.25),
formatter=_past_day_formatter,
reason='frequent slow units')
# This should end up being very similar to the slow unit specification, and is
# included for the same reason.
TIMEOUT_SPECIFICATION = QuerySpecification(
query_format=GENERIC_QUERY_FORMAT.format(
field_name='timeout_count', min_weight=0.25),
formatter=_past_day_formatter,
reason='frequent timeouts')
# Fuzzers with extremely frequent OOMs may contain leaks or other issues that
# signal that they need some improvement. Run with a slightly reduced weight
# until the issues are fixed.
OOM_SPECIFICATION = QuerySpecification(
query_format=GENERIC_QUERY_FORMAT.format(
field_name='oom_count', min_weight=0.25),
formatter=_past_day_formatter,
reason='frequent OOMs')
# Fuzzers which are crashing frequently may not be making full use of their
# allotted time for fuzzing, and may end up being more effective once the known
# issues are fixed. This rule is more lenient than some of the others as even
# healthy fuzzers are expected to have some crashes.
CRASH_SPECIFICATION = QuerySpecification(
query_format=GENERIC_QUERY_FORMAT.format(
field_name='crash_count', min_weight=0.50),
formatter=_past_day_formatter,
reason='frequent crashes')
# New fuzzers/jobs should run much more frequently than others. In this case, we
# test the fraction of days for which we have no stats for this fuzzer/job pair
# and increase if it's nonzero.
NEW_FUZZER_FORMAT = """
SELECT
fuzzer,
job,
5.0 as new_weight,
MIN(_PARTITIONTIME) as first_time
FROM
{dataset}.TestcaseRun
GROUP BY
fuzzer,
job
HAVING
first_time >= TIMESTAMP('{cutoff_time}')
"""
NEW_FUZZER_SPECIFICATION = QuerySpecification(
query_format=NEW_FUZZER_FORMAT,
formatter=_new_fuzzer_formatter,
reason='new fuzzer')
# Format to query for fuzzers with minimal change in week to week coverage.
COVERAGE_UNCHANGED_FORMAT = """
SELECT
recent.fuzzer AS fuzzer,
recent.job AS job,
0.75 as new_weight
FROM (
SELECT
fuzzer,
job,
MAX(edge_coverage / edges_total) AS coverage
FROM
{dataset}.TestcaseRun
WHERE
_PARTITIONTIME BETWEEN TIMESTAMP('{middle_date}')
AND TIMESTAMP('{end_date}')
AND edges_total > 0
AND edge_coverage > 0
GROUP BY
fuzzer,
job
HAVING
coverage <= 1.0) AS recent
JOIN (
SELECT
fuzzer,
job,
MAX(edge_coverage / edges_total) AS coverage
FROM
{dataset}.TestcaseRun
WHERE
_PARTITIONTIME BETWEEN TIMESTAMP('{start_date}')
AND TIMESTAMP('{middle_date}')
AND edges_total > 0
AND edge_coverage > 0
GROUP BY
fuzzer,
job
HAVING
coverage <= 1.0) AS older
ON
recent.fuzzer = older.fuzzer
AND recent.job = older.job
WHERE
ABS((recent.coverage - older.coverage) / recent.coverage) < 0.01
"""
COVERAGE_UNCHANGED_SPECIFICATION = QuerySpecification(
query_format=COVERAGE_UNCHANGED_FORMAT,
formatter=_coverage_formatter,
reason='coverage flat over past 2 weeks')
# Mappings for which specifications to use for which
LIBFUZZER_SPECIFICATIONS = [
COVERAGE_UNCHANGED_SPECIFICATION,
CRASH_SPECIFICATION,
NEW_FUZZER_SPECIFICATION,
OOM_SPECIFICATION,
SLOW_UNIT_SPECIFICATION,
STARTUP_CRASH_SPECIFICATION,
TIMEOUT_SPECIFICATION,
]
AFL_SPECIFICATIONS = [
CRASH_SPECIFICATION,
NEW_FUZZER_SPECIFICATION,
STARTUP_CRASH_SPECIFICATION,
]
RESTORE_DEFAULT_MATCH = SpecificationMatch(
new_weight=1.0, reason='no longer matches any weight adjustment rules')
def _query_helper(client, query):
"""Helper function to get fuzzer stats."""
return client.query(query=query).rows
def _update_match(matches, fuzzer, job, match):
"""Update the weight for a fuzzer/job."""
key = (fuzzer, job)
old_match = matches.get(key, RESTORE_DEFAULT_MATCH)
new_weight = match.new_weight
old_weight = old_match.new_weight
# Rules that increase weights are expected to take precedence over any that
# lower the weight. Issues with new fuzzers may be fixed intraday and other
# issues like crashes shouldn't be penalized for them.
if old_weight > 1.0:
return
# Always update the weight if the previous value is the default. This is
# required to deal with specifications that are meant to set the weight above
# 1.0. Otherwise, prioritize only the most penalizing match for this pairing.
if old_match == RESTORE_DEFAULT_MATCH or new_weight < old_weight:
matches[key] = match
def update_weight_for_target(fuzz_target_name, job, match):
"""Set the weight for a particular target."""
target_job = data_handler.get_fuzz_target_job(fuzz_target_name, job)
if not target_job:
# Bail out. This is expected if any fuzzer/job combinations become outdated.
return
weight = match.new_weight
logs.log('Adjusted weight to %f for target %s and job %s (%s).' %
(weight, fuzz_target_name, job, match.reason))
target_job.weight = weight
target_job.put()
def update_matches_for_specification(specification, client, engine, matches,
run_set):
"""Run a query and adjust weights based on a given query specification."""
query = specification.formatter(specification.query_format,
fuzzer_stats.dataset_name(engine))
results = _query_helper(client, query)
for result in results:
fuzzer = result['fuzzer']
job = result['job']
new_weight = result['new_weight']
if new_weight is None:
continue
run_set.add((fuzzer, job))
if new_weight != 1.0:
match = SpecificationMatch(
new_weight=new_weight, reason=specification.reason)
_update_match(matches, fuzzer, job, match)
def update_target_weights_for_engine(client, engine, specifications):
"""Update all fuzz target weights for the specified engine."""
matches = {}
run_set = set()
# All fuzzers with non-default weights must be tracked with a special
# specification. This ensures that they will be restored to normal weight
# once conditions causing adjustments are no longer met.
target_jobs = data_types.FuzzTargetJob.query(
data_types.FuzzTarget.engine == engine).filter(
data_types.FuzzTargetJob.weight != 1.0)
for target_job in target_jobs:
matches[(target_job.fuzz_target_name,
target_job.job)] = RESTORE_DEFAULT_MATCH
for match in specifications:
update_matches_for_specification(match, client, engine, matches, run_set)
for (fuzzer, job), match in six.iteritems(matches):
if (fuzzer, job) not in run_set:
# This ensures that we don't reset weights for fuzzers with problems if
# they didn't run in the time covered by our queries.
continue
update_weight_for_target(fuzzer, job, match)
logs.log('Weight adjustments complete for engine %s.' % engine)
def store_current_weights_in_bigquery():
"""Update a bigquery table containing the daily stats."""
rows = []
target_jobs = ndb_utils.get_all_from_model(data_types.FuzzTargetJob)
for target_job in target_jobs:
row = {
'fuzzer': target_job.fuzz_target_name,
'job': target_job.job,
'weight': target_job.weight
}
rows.append(big_query.Insert(row=row, insert_id=None))
client = big_query.Client(dataset_id='main', table_id='fuzzer_weights')
client.insert(rows)
def update_job_weight(job_name, multiplier):
"""Update a job weight."""
tool_name = environment.get_memory_tool_name(job_name)
multiplier *= SANITIZER_WEIGHTS.get(tool_name, DEFAULT_SANITIZER_WEIGHT)
engine = environment.get_engine_for_job(job_name)
multiplier *= ENGINE_WEIGHTS.get(engine, DEFAULT_ENGINE_WEIGHT)
query = data_types.FuzzerJob.query(data_types.FuzzerJob.job == job_name)
changed_weights = []
for fuzzer_job in query:
if fuzzer_job.multiplier != multiplier:
fuzzer_job.multiplier = multiplier
changed_weights.append(fuzzer_job)
if changed_weights:
ndb_utils.put_multi(changed_weights)
def update_job_weights():
"""Update job weights."""
for job in data_types.Job.query():
multiplier = DEFAULT_MULTIPLIER
if environment.is_engine_fuzzer_job(job.name):
targets_count = ndb.Key(data_types.FuzzTargetsCount, job.name).get()
# If the count is 0, it may be due to a bad build or some other issue. Use
# the default weight in that case to allow for recovery.
if targets_count and targets_count.count:
multiplier = targets_count.count
multiplier = min(multiplier, TARGET_COUNT_WEIGHT_CAP)
update_job_weight(job.name, multiplier)
class Handler(base_handler.Handler):
"""Handler to periodically update fuzz target weights based on performance."""
@handler.cron()
def get(self):
"""Process all fuzz targets and update FuzzTargetJob weights."""
client = big_query.Client()
update_target_weights_for_engine(client, 'libFuzzer',
LIBFUZZER_SPECIFICATIONS)
update_target_weights_for_engine(client, 'afl', AFL_SPECIFICATIONS)
update_job_weights()
store_current_weights_in_bigquery()
|
|
"""Support for Xiaomi Mi Flora BLE plant sensor."""
from datetime import timedelta
import logging
import btlewrap
from btlewrap import BluetoothBackendException
from miflora import miflora_poller
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import (
CONDUCTIVITY,
CONF_FORCE_UPDATE,
CONF_MAC,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_SCAN_INTERVAL,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
EVENT_HOMEASSISTANT_START,
LIGHT_LUX,
PERCENTAGE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.util.temperature import celsius_to_fahrenheit
try:
import bluepy.btle # noqa: F401 pylint: disable=unused-import
BACKEND = btlewrap.BluepyBackend
except ImportError:
BACKEND = btlewrap.GatttoolBackend
_LOGGER = logging.getLogger(__name__)
CONF_ADAPTER = "adapter"
CONF_MEDIAN = "median"
CONF_GO_UNAVAILABLE_TIMEOUT = "go_unavailable_timeout"
DEFAULT_ADAPTER = "hci0"
DEFAULT_FORCE_UPDATE = False
DEFAULT_MEDIAN = 3
DEFAULT_NAME = "Mi Flora"
DEFAULT_GO_UNAVAILABLE_TIMEOUT = timedelta(seconds=7200)
SCAN_INTERVAL = timedelta(seconds=1200)
ATTR_LAST_SUCCESSFUL_UPDATE = "last_successful_update"
# Sensor types are defined like: Name, units, icon, device_class
SENSOR_TYPES = {
"temperature": ["Temperature", TEMP_CELSIUS, None, DEVICE_CLASS_TEMPERATURE],
"light": ["Light intensity", LIGHT_LUX, None, DEVICE_CLASS_ILLUMINANCE],
"moisture": ["Moisture", PERCENTAGE, "mdi:water-percent", None],
"conductivity": ["Conductivity", CONDUCTIVITY, "mdi:flash-circle", None],
"battery": ["Battery", PERCENTAGE, None, DEVICE_CLASS_BATTERY],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MEDIAN, default=DEFAULT_MEDIAN): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_ADAPTER, default=DEFAULT_ADAPTER): cv.string,
vol.Optional(
CONF_GO_UNAVAILABLE_TIMEOUT, default=DEFAULT_GO_UNAVAILABLE_TIMEOUT
): cv.time_period,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the MiFlora sensor."""
backend = BACKEND
_LOGGER.debug("Miflora is using %s backend", backend.__name__)
cache = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL).total_seconds()
poller = miflora_poller.MiFloraPoller(
config.get(CONF_MAC),
cache_timeout=cache,
adapter=config.get(CONF_ADAPTER),
backend=backend,
)
force_update = config.get(CONF_FORCE_UPDATE)
median = config.get(CONF_MEDIAN)
go_unavailable_timeout = config.get(CONF_GO_UNAVAILABLE_TIMEOUT)
devs = []
for parameter in config[CONF_MONITORED_CONDITIONS]:
name = SENSOR_TYPES[parameter][0]
unit = (
hass.config.units.temperature_unit
if parameter == "temperature"
else SENSOR_TYPES[parameter][1]
)
icon = SENSOR_TYPES[parameter][2]
device_class = SENSOR_TYPES[parameter][3]
prefix = config.get(CONF_NAME)
if prefix:
name = f"{prefix} {name}"
devs.append(
MiFloraSensor(
poller,
parameter,
name,
unit,
icon,
device_class,
force_update,
median,
go_unavailable_timeout,
)
)
async_add_entities(devs)
class MiFloraSensor(SensorEntity):
"""Implementing the MiFlora sensor."""
def __init__(
self,
poller,
parameter,
name,
unit,
icon,
device_class,
force_update,
median,
go_unavailable_timeout,
):
"""Initialize the sensor."""
self.poller = poller
self.parameter = parameter
self._unit = unit
self._icon = icon
self._name = name
self._state = None
self._device_class = device_class
self.data = []
self._force_update = force_update
self.go_unavailable_timeout = go_unavailable_timeout
self.last_successful_update = dt_util.utc_from_timestamp(0)
# Median is used to filter out outliers. median of 3 will filter
# single outliers, while median of 5 will filter double outliers
# Use median_count = 1 if no filtering is required.
self.median_count = median
async def async_added_to_hass(self):
"""Set initial state."""
@callback
def on_startup(_):
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, on_startup)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return True if did update since 2h."""
return self.last_successful_update > (
dt_util.utcnow() - self.go_unavailable_timeout
)
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
return {ATTR_LAST_SUCCESSFUL_UPDATE: self.last_successful_update}
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def state_class(self):
"""Return the state class of this entity."""
return STATE_CLASS_MEASUREMENT
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit
@property
def icon(self):
"""Return the icon of the sensor."""
return self._icon
@property
def force_update(self):
"""Force update."""
return self._force_update
def update(self):
"""
Update current conditions.
This uses a rolling median over 3 values to filter out outliers.
"""
try:
_LOGGER.debug("Polling data for %s", self.name)
data = self.poller.parameter_value(self.parameter)
except (OSError, BluetoothBackendException) as err:
_LOGGER.info("Polling error %s: %s", type(err).__name__, err)
return
if data is not None:
_LOGGER.debug("%s = %s", self.name, data)
if self._unit == TEMP_FAHRENHEIT:
data = celsius_to_fahrenheit(data)
self.data.append(data)
self.last_successful_update = dt_util.utcnow()
else:
_LOGGER.info("Did not receive any data from Mi Flora sensor %s", self.name)
# Remove old data from median list or set sensor value to None
# if no data is available anymore
if self.data:
self.data = self.data[1:]
else:
self._state = None
return
_LOGGER.debug("Data collected: %s", self.data)
if len(self.data) > self.median_count:
self.data = self.data[1:]
if len(self.data) == self.median_count:
median = sorted(self.data)[int((self.median_count - 1) / 2)]
_LOGGER.debug("Median is: %s", median)
self._state = median
elif self._state is None:
_LOGGER.debug("Set initial state")
self._state = self.data[0]
else:
_LOGGER.debug("Not yet enough data for median calculation")
|
|
#=======================================================================
# sim.py
#=======================================================================
# This is the common top-level simulator. ISA implementations can use
# various hooks to configure the behavior.
import os
import sys
# ensure we know where the pypy source code is
# XXX: removed the dependency to PYDGIN_PYPY_SRC_DIR because rpython
# libraries are much slower than native python when running on an
# interpreter. So unless the user have added rpython source to their
# PYTHONPATH, we should use native python.
#try:
# sys.path.append( os.environ['PYDGIN_PYPY_SRC_DIR'] )
#except KeyError as e:
# print "NOTE: PYDGIN_PYPY_SRC_DIR not defined, using pure python " \
# "implementation"
from pydgin.debug import Debug, pad, pad_hex
from pydgin.misc import FatalError, NotImplementedInstError
from pydgin.jit import JitDriver, hint, set_user_param, set_param
def jitpolicy(driver):
from rpython.jit.codewriter.policy import JitPolicy
return JitPolicy()
#-------------------------------------------------------------------------
# Sim
#-------------------------------------------------------------------------
# Abstract simulator class
class Sim( object ):
def __init__( self, arch_name_human, arch_name="", jit_enabled=False ):
# the human-friendly architecture name can contain large caps, special
# characters etc.
self.arch_name_human = arch_name_human
if arch_name == "":
self.arch_name = arch_name_human.lower()
else:
self.arch_name = arch_name
self.jit_enabled = jit_enabled
if jit_enabled:
self.jitdriver = JitDriver( greens =['pc',],
reds = ['max_insts', 'state', 'sim',],
virtualizables =['state',],
get_printable_location=self.get_location,
)
# Set the default trace limit here. Different ISAs can override this
# value if necessary
self.default_trace_limit = 400000
self.max_insts = 0
#-----------------------------------------------------------------------
# decode
#-----------------------------------------------------------------------
# This needs to be implemented in the child class
def decode( self, bits ):
raise NotImplementedError()
#-----------------------------------------------------------------------
# hooks provided for isa implementations
#-----------------------------------------------------------------------
def pre_execute( self ):
pass
def post_execute( self ):
pass
#-----------------------------------------------------------------------
# init_state
#-----------------------------------------------------------------------
# This needs to be implemented in the child class
def init_state( self, exe_file, exe_name, run_argv, testbin ):
raise NotImplementedError()
#-----------------------------------------------------------------------
# help message
#-----------------------------------------------------------------------
# the help message to display on --help
help_message = """
Pydgin %s Instruction Set Simulator
usage: %s <args> <sim_exe> <sim_args>
<sim_exe> the executable to be simulated
<sim_args> arguments to be passed to the simulated executable
<args> the following optional arguments are supported:
--help,-h Show this message and exit
--test Run in testing mode (for running asm tests)
--env,-e <NAME>=<VALUE>
Set an environment variable to be passed to the
simulated program. Can use multiple --env flags to set
multiple environment variables.
--debug,-d <flags>[:<start_after>]
Enable debug flags in a comma-separated form (e.g.
"--debug syscalls,insts"). If provided, debugs starts
after <start_after> cycles. The following flags are
supported:
insts cycle-by-cycle instructions
rf register file accesses
mem memory accesses
regdump register dump
syscalls syscall information
bootstrap initial stack and register state
--max-insts <i> Run until the maximum number of instructions
--jit <flags> Set flags to tune the JIT (see
rpython.rlib.jit.PARAMETER_DOCS)
"""
#-----------------------------------------------------------------------
# get_location
#-----------------------------------------------------------------------
# for debug printing in PYPYLOG
@staticmethod
def get_location( pc ):
# TODO: add the disassembly of the instruction here as well
return "pc: %x" % pc
#-----------------------------------------------------------------------
# run
#-----------------------------------------------------------------------
def run( self ):
self = hint( self, promote=True )
s = self.state
max_insts = self.max_insts
jitdriver = self.jitdriver
while s.running:
jitdriver.jit_merge_point(
pc = s.fetch_pc(),
max_insts = max_insts,
state = s,
sim = self,
)
# constant-fold pc and mem
pc = hint( s.fetch_pc(), promote=True )
old = pc
mem = hint( s.mem, promote=True )
if s.debug.enabled( "insts" ):
print pad( "%x" % pc, 8, " ", False ),
# the print statement in memcheck conflicts with @elidable in iread.
# So we use normal read if memcheck is enabled which includes the
# memory checks
if s.debug.enabled( "memcheck" ):
inst_bits = mem.read( pc, 4 )
else:
# we use trace elidable iread instead of just read
inst_bits = mem.iread( pc, 4 )
try:
inst, exec_fun = self.decode( inst_bits )
if s.debug.enabled( "insts" ):
print "%s %s %s" % (
pad_hex( inst_bits ),
pad( inst.str, 12 ),
pad( "%d" % s.num_insts, 8 ), ),
self.pre_execute()
exec_fun( s, inst )
except NotImplementedInstError:
# re-decode instruction to get the instruction name
inst, _ = self.decode( inst_bits )
print "Instruction not implemented: %s (pc: 0x%s), aborting!" \
% ( inst.str, pad_hex( pc ) )
break
except FatalError as error:
print "Exception in execution (pc: 0x%s), aborting!" % pad_hex( pc )
print "Exception message: %s" % error.msg
break
s.num_insts += 1 # TODO: should this be done inside instruction definition?
if s.stats_en: s.stat_num_insts += 1
self.post_execute()
if s.debug.enabled( "insts" ):
print
if s.debug.enabled( "regdump" ):
s.rf.print_regs( per_row=4 )
# check if we have reached the end of the maximum instructions and
# exit if necessary
if max_insts != 0 and s.num_insts >= max_insts:
print "Reached the max_insts (%d), exiting." % max_insts
break
if s.fetch_pc() < old:
jitdriver.can_enter_jit(
pc = s.fetch_pc(),
max_insts = max_insts,
state = s,
sim = self,
)
print 'DONE! Status =', s.status
print 'Instructions Executed =', s.num_insts
#-----------------------------------------------------------------------
# get_entry_point
#-----------------------------------------------------------------------
# generates and returns the entry_point function used to start the
# simulator
def get_entry_point( self ):
def entry_point( argv ):
# set the trace_limit parameter of the jitdriver
if self.jit_enabled:
set_param( self.jitdriver, "trace_limit", self.default_trace_limit )
filename_idx = 0
debug_flags = []
debug_starts_after = 0
testbin = False
max_insts = 0
envp = []
# we're using a mini state machine to parse the args
prev_token = ""
# list of tokens that require an additional arg
tokens_with_args = [ "-h", "--help",
"-e", "--env",
"-d", "--debug",
"--max-insts",
"--jit",
]
# go through the args one by one and parse accordingly
for i in xrange( 1, len( argv ) ):
token = argv[i]
if prev_token == "":
if token == "--help" or token == "-h":
print self.help_message % ( self.arch_name_human, argv[0] )
return 0
elif token == "--test":
testbin = True
elif token == "--debug" or token == "-d":
prev_token = token
# warn the user if debugs are not enabled for this translation
if not Debug.global_enabled:
print "WARNING: debugs are not enabled for this translation. " + \
"To allow debugs, translate with --debug option."
elif token in tokens_with_args:
prev_token = token
elif token[:1] == "-":
# unknown option
print "Unknown argument %s" % token
return 1
else:
# this marks the start of the program name
filename_idx = i
break
else:
if prev_token == "--env" or prev_token == "-e":
envp.append( token )
elif prev_token == "--debug" or prev_token == "-d":
# if debug start after provided (using a colon), parse it
debug_tokens = token.split( ":" )
if len( debug_tokens ) > 1:
debug_starts_after = int( debug_tokens[1] )
debug_flags = debug_tokens[0].split( "," )
elif prev_token == "--max-insts":
self.max_insts = int( token )
elif prev_token == "--jit":
# pass the jit flags to rpython.rlib.jit
set_user_param( self.jitdriver, token )
prev_token = ""
if filename_idx == 0:
print "You must supply a filename"
return 1
# create a Debug object which contains the debug flags
self.debug = Debug( debug_flags, debug_starts_after )
filename = argv[ filename_idx ]
# args after program are args to the simulated program
run_argv = argv[ filename_idx : ]
# Open the executable for reading
try:
exe_file = open( filename, 'rb' )
except IOError:
print "Could not open file %s" % filename
return 1
# Call ISA-dependent init_state to load program, initialize memory
# etc.
self.init_state( exe_file, filename, run_argv, envp, testbin )
# pass the state to debug for cycle-triggered debugging
self.debug.set_state( self.state )
# Close after loading
exe_file.close()
# Execute the program
self.run()
return 0
return entry_point
#-----------------------------------------------------------------------
# target
#-----------------------------------------------------------------------
# Enables RPython translation of our interpreter.
def target( self, driver, args ):
# if --debug flag is provided in translation, we enable debug printing
if "--debug" in args:
print "Enabling debugging"
Debug.global_enabled = True
else:
print "Disabling debugging"
# form a name
exe_name = "pydgin-%s" % self.arch_name
if driver.config.translation.jit:
exe_name += "-jit"
else:
exe_name += "-nojit"
if Debug.global_enabled:
exe_name += "-debug"
print "Translated binary name:", exe_name
driver.exe_name = exe_name
# NOTE: RPython has an assertion to check the type of entry_point to
# be function (not a bound method). So we use get_entry_point which
# generates a function type
#return self.entry_point, None
return self.get_entry_point(), None
#-------------------------------------------------------------------------
# init_sim
#-------------------------------------------------------------------------
# Simulator implementations need to call this function at the top level.
# This takes care of adding target function to top level environment and
# running the simulation in interpreted mode if directly called
# ( __name__ == "__main__" )
def init_sim( sim ):
# this is a bit hacky: we get the global variables of the caller from
# the stack frame to determine if this is being run top level and add
# target function required by rpython toolchain
caller_globals = sys._getframe(1).f_globals
caller_name = caller_globals[ "__name__" ]
# add target function to top level
caller_globals[ "target" ] = sim.target
#-----------------------------------------------------------------------
# main
#-----------------------------------------------------------------------
# Enables CPython simulation of our interpreter.
if caller_name == "__main__":
# enable debug flags in interpreted mode
Debug.global_enabled = True
sim.get_entry_point()( sys.argv )
|
|
import logging
import string
import sys
from collections import deque
import numpy as np
from scipy import linalg, special
from sklearn.base import BaseEstimator
from sklearn.utils import check_array, check_random_state
from . import _hmmc, _utils
from .utils import normalize, log_normalize, log_mask_zero
_log = logging.getLogger(__name__)
#: Supported decoder algorithms.
DECODER_ALGORITHMS = frozenset(("viterbi", "map"))
class ConvergenceMonitor:
"""
Monitor and report convergence to :data:`sys.stderr`.
Attributes
----------
history : deque
The log probability of the data for the last two training
iterations. If the values are not strictly increasing, the
model did not converge.
iter : int
Number of iterations performed while training the model.
Examples
--------
Use custom convergence criteria by subclassing ``ConvergenceMonitor``
and redefining the ``converged`` method. The resulting subclass can
be used by creating an instance and pointing a model's ``monitor_``
attribute to it prior to fitting.
>>> from hmmlearn.base import ConvergenceMonitor
>>> from hmmlearn import hmm
>>>
>>> class ThresholdMonitor(ConvergenceMonitor):
... @property
... def converged(self):
... return (self.iter == self.n_iter or
... self.history[-1] >= self.tol)
>>>
>>> model = hmm.GaussianHMM(n_components=2, tol=5, verbose=True)
>>> model.monitor_ = ThresholdMonitor(model.monitor_.tol,
... model.monitor_.n_iter,
... model.monitor_.verbose)
"""
_template = "{iter:>10d} {log_prob:>16.4f} {delta:>+16.4f}"
def __init__(self, tol, n_iter, verbose):
"""
Parameters
----------
tol : double
Convergence threshold. EM has converged either if the maximum
number of iterations is reached or the log probability improvement
between the two consecutive iterations is less than threshold.
n_iter : int
Maximum number of iterations to perform.
verbose : bool
Whether per-iteration convergence reports are printed.
"""
self.tol = tol
self.n_iter = n_iter
self.verbose = verbose
self.history = deque()
self.iter = 0
def __repr__(self):
class_name = self.__class__.__name__
params = sorted(dict(vars(self), history=list(self.history)).items())
return (f"{class_name}(\n"
+ "".join(map(" {}={},\n".format, *zip(*params)))
+ ")")
def _reset(self):
"""Reset the monitor's state."""
self.iter = 0
self.history.clear()
def report(self, log_prob):
"""
Report convergence to :data:`sys.stderr`.
The output consists of three columns: iteration number, log
probability of the data at the current iteration and convergence
rate. At the first iteration convergence rate is unknown and
is thus denoted by NaN.
Parameters
----------
log_prob : float
The log probability of the data as computed by EM algorithm
in the current iteration.
"""
if self.verbose:
delta = log_prob - self.history[-1] if self.history else np.nan
message = self._template.format(
iter=self.iter + 1, log_prob=log_prob, delta=delta)
print(message, file=sys.stderr)
self.history.append(log_prob)
self.iter += 1
@property
def converged(self):
"""Whether the EM algorithm converged."""
# XXX we might want to check that ``log_prob`` is non-decreasing.
return (self.iter == self.n_iter or
(len(self.history) >= 2 and
self.history[-1] - self.history[-2] < self.tol))
class BaseHMM(BaseEstimator):
"""
Base class for Hidden Markov Models.
This class allows for easy evaluation of, sampling from, and maximum a
posteriori estimation of the parameters of a HMM.
Attributes
----------
monitor_ : ConvergenceMonitor
Monitor object used to check the convergence of EM.
startprob_ : array, shape (n_components, )
Initial state occupation distribution.
transmat_ : array, shape (n_components, n_components)
Matrix of transition probabilities between states.
Notes
-----
Normally, one should use a subclass of `.BaseHMM`, with its specialization
towards a given emission model. In rare cases, the base class can also be
useful in itself, if one simply wants to generate a sequence of states
using `.BaseHMM.sample`. In that case, the feature matrix will have zero
features.
"""
def __init__(self, n_components=1,
startprob_prior=1.0, transmat_prior=1.0,
algorithm="viterbi", random_state=None,
n_iter=10, tol=1e-2, verbose=False,
params=string.ascii_letters,
init_params=string.ascii_letters,
implementation="log"):
"""
Parameters
----------
n_components : int
Number of states in the model.
startprob_prior : array, shape (n_components, ), optional
Parameters of the Dirichlet prior distribution for
:attr:`startprob_`.
transmat_prior : array, shape (n_components, n_components), optional
Parameters of the Dirichlet prior distribution for each row
of the transition probabilities :attr:`transmat_`.
algorithm : {"viterbi", "map"}, optional
Decoder algorithm.
random_state: RandomState or an int seed, optional
A random number generator instance.
n_iter : int, optional
Maximum number of iterations to perform.
tol : float, optional
Convergence threshold. EM will stop if the gain in log-likelihood
is below this value.
verbose : bool, optional
Whether per-iteration convergence reports are printed to
:data:`sys.stderr`. Convergence can also be diagnosed using the
:attr:`monitor_` attribute.
params, init_params : string, optional
The parameters that get updated during (``params``) or initialized
before (``init_params``) the training. Can contain any combination
of 's' for startprob, 't' for transmat, and other characters for
subclass-specific emission parameters. Defaults to all parameters.
implementation: string, optional
Determines if the forward-backward algorithm is implemented with
logarithms ("log"), or using scaling ("scaling"). The default is
to use logarithms for backwards compatability. However, the
scaling implementation is generally faster.
"""
self.n_components = n_components
self.params = params
self.init_params = init_params
self.startprob_prior = startprob_prior
self.transmat_prior = transmat_prior
self.algorithm = algorithm
self.random_state = random_state
self.n_iter = n_iter
self.tol = tol
self.verbose = verbose
self.implementation = implementation
self.monitor_ = ConvergenceMonitor(self.tol, self.n_iter, self.verbose)
def get_stationary_distribution(self):
"""Compute the stationary distribution of states."""
# The stationary distribution is proportional to the left-eigenvector
# associated with the largest eigenvalue (i.e., 1) of the transition
# matrix.
_utils.check_is_fitted(self, "transmat_")
eigvals, eigvecs = linalg.eig(self.transmat_.T)
eigvec = np.real_if_close(eigvecs[:, np.argmax(eigvals)])
return eigvec / eigvec.sum()
def score_samples(self, X, lengths=None):
"""
Compute the log probability under the model and compute posteriors.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
log_prob : float
Log likelihood of ``X``.
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample in ``X``.
See Also
--------
score : Compute the log probability under the model.
decode : Find most likely state sequence corresponding to ``X``.
"""
return self._score(X, lengths, compute_posteriors=True)
def score(self, X, lengths=None):
"""
Compute the log probability under the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
log_prob : float
Log likelihood of ``X``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
decode : Find most likely state sequence corresponding to ``X``.
"""
return self._score(X, lengths, compute_posteriors=False)[0]
def _score(self, X, lengths=None, *, compute_posteriors):
"""
Helper for `score` and `score_samples`.
Compute the log probability under the model, as well as posteriors if
*compute_posteriors* is True (otherwise, an empty array is returned
for the latter).
"""
_utils.check_is_fitted(self, "startprob_")
self._check()
X = check_array(X)
impl = {
"scaling": self._score_scaling,
"log": self._score_log,
}[self.implementation]
return impl(
X=X, lengths=lengths, compute_posteriors=compute_posteriors)
def _score_log(self, X, lengths=None, *, compute_posteriors):
"""
Compute the log probability under the model, as well as posteriors if
*compute_posteriors* is True (otherwise, an empty array is returned
for the latter).
"""
log_prob = 0
sub_posteriors = [np.empty((0, self.n_components))]
for sub_X in _utils.split_X_lengths(X, lengths):
log_frameprob = self._compute_log_likelihood(sub_X)
log_probij, fwdlattice = self._do_forward_log_pass(log_frameprob)
log_prob += log_probij
if compute_posteriors:
bwdlattice = self._do_backward_log_pass(log_frameprob)
sub_posteriors.append(
self._compute_posteriors_log(fwdlattice, bwdlattice))
return log_prob, np.concatenate(sub_posteriors)
def _score_scaling(self, X, lengths=None, *, compute_posteriors):
log_prob = 0
sub_posteriors = [np.empty((0, self.n_components))]
for sub_X in _utils.split_X_lengths(X, lengths):
frameprob = self._compute_likelihood(sub_X)
log_probij, fwdlattice, scaling_factors = \
self._do_forward_scaling_pass(frameprob)
log_prob += log_probij
if compute_posteriors:
bwdlattice = self._do_backward_scaling_pass(
frameprob, scaling_factors)
sub_posteriors.append(
self._compute_posteriors_scaling(fwdlattice, bwdlattice))
return log_prob, np.concatenate(sub_posteriors)
def _decode_viterbi(self, X):
log_frameprob = self._compute_log_likelihood(X)
return self._do_viterbi_pass(log_frameprob)
def _decode_map(self, X):
_, posteriors = self.score_samples(X)
log_prob = np.max(posteriors, axis=1).sum()
state_sequence = np.argmax(posteriors, axis=1)
return log_prob, state_sequence
def decode(self, X, lengths=None, algorithm=None):
"""
Find most likely state sequence corresponding to ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
algorithm : string
Decoder algorithm. Must be one of "viterbi" or "map".
If not given, :attr:`decoder` is used.
Returns
-------
log_prob : float
Log probability of the produced state sequence.
state_sequence : array, shape (n_samples, )
Labels for each sample from ``X`` obtained via a given
decoder ``algorithm``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_utils.check_is_fitted(self, "startprob_")
self._check()
algorithm = algorithm or self.algorithm
if algorithm not in DECODER_ALGORITHMS:
raise ValueError(f"Unknown decoder {algorithm!r}")
decoder = {
"viterbi": self._decode_viterbi,
"map": self._decode_map
}[algorithm]
X = check_array(X)
log_prob = 0
sub_state_sequences = []
for sub_X in _utils.split_X_lengths(X, lengths):
# XXX decoder works on a single sample at a time!
sub_log_prob, sub_state_sequence = decoder(sub_X)
log_prob += sub_log_prob
sub_state_sequences.append(sub_state_sequence)
return log_prob, np.concatenate(sub_state_sequences)
def predict(self, X, lengths=None):
"""
Find most likely state sequence corresponding to ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
state_sequence : array, shape (n_samples, )
Labels for each sample from ``X``.
"""
_, state_sequence = self.decode(X, lengths)
return state_sequence
def predict_proba(self, X, lengths=None):
"""
Compute the posterior probability for each state in the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample from ``X``.
"""
_, posteriors = self.score_samples(X, lengths)
return posteriors
def sample(self, n_samples=1, random_state=None, currstate=None):
"""
Generate random samples from the model.
Parameters
----------
n_samples : int
Number of samples to generate.
random_state : RandomState or an int seed
A random number generator instance. If ``None``, the object's
``random_state`` is used.
currstate : int
Current state, as the initial state of the samples.
Returns
-------
X : array, shape (n_samples, n_features)
Feature matrix.
state_sequence : array, shape (n_samples, )
State sequence produced by the model.
Examples
--------
::
# generate samples continuously
_, Z = model.sample(n_samples=10)
X, Z = model.sample(n_samples=10, currstate=Z[-1])
"""
_utils.check_is_fitted(self, "startprob_")
self._check()
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
transmat_cdf = np.cumsum(self.transmat_, axis=1)
if currstate is None:
startprob_cdf = np.cumsum(self.startprob_)
currstate = (startprob_cdf > random_state.rand()).argmax()
state_sequence = [currstate]
X = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for t in range(n_samples - 1):
currstate = (transmat_cdf[currstate] > random_state.rand()) \
.argmax()
state_sequence.append(currstate)
X.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.atleast_2d(X), np.array(state_sequence, dtype=int)
def fit(self, X, lengths=None):
"""
Estimate model parameters.
An initialization step is performed before entering the
EM algorithm. If you want to avoid this step for a subset of
the parameters, pass proper ``init_params`` keyword argument
to estimator's constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, )
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
self._init(X)
self._check()
self.monitor_._reset()
impl = {
"scaling": self._fit_scaling,
"log": self._fit_log,
}[self.implementation]
for iter in range(self.n_iter):
stats = self._initialize_sufficient_statistics()
curr_log_prob = 0
for sub_X in _utils.split_X_lengths(X, lengths):
lattice, log_prob, posteriors, fwdlattice, bwdlattice = \
impl(sub_X)
# Derived HMM classes will implement the following method to
# update their probability distributions, so keep
# a single call to this method for simplicity.
self._accumulate_sufficient_statistics(
stats, sub_X, lattice, posteriors, fwdlattice,
bwdlattice)
curr_log_prob += log_prob
# XXX must be before convergence check, because otherwise
# there won't be any updates for the case ``n_iter=1``.
self._do_mstep(stats)
self.monitor_.report(curr_log_prob)
if self.monitor_.converged:
break
if (self.transmat_.sum(axis=1) == 0).any():
_log.warning("Some rows of transmat_ have zero sum because no "
"transition from the state was ever observed.")
return self
def _fit_scaling(self, X):
frameprob = self._compute_likelihood(X)
log_prob, fwdlattice, scaling_factors = \
self._do_forward_scaling_pass(frameprob)
bwdlattice = self._do_backward_scaling_pass(frameprob, scaling_factors)
posteriors = self._compute_posteriors_scaling(fwdlattice, bwdlattice)
return frameprob, log_prob, posteriors, fwdlattice, bwdlattice
def _fit_log(self, X):
log_frameprob = self._compute_log_likelihood(X)
log_prob, fwdlattice = self._do_forward_log_pass(log_frameprob)
bwdlattice = self._do_backward_log_pass(log_frameprob)
posteriors = self._compute_posteriors_log(fwdlattice, bwdlattice)
return log_frameprob, log_prob, posteriors, fwdlattice, bwdlattice
def _do_viterbi_pass(self, log_frameprob):
state_sequence, log_prob = _hmmc.viterbi(
log_mask_zero(self.startprob_), log_mask_zero(self.transmat_),
log_frameprob)
return log_prob, state_sequence
def _do_forward_scaling_pass(self, frameprob):
fwdlattice, scaling_factors = _hmmc.forward_scaling(
np.asarray(self.startprob_), np.asarray(self.transmat_),
frameprob)
log_prob = -np.sum(np.log(scaling_factors))
return log_prob, fwdlattice, scaling_factors
def _do_forward_log_pass(self, log_frameprob):
fwdlattice = _hmmc.forward_log(
log_mask_zero(self.startprob_), log_mask_zero(self.transmat_),
log_frameprob)
with np.errstate(under="ignore"):
return special.logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_scaling_pass(self, frameprob, scaling_factors):
bwdlattice = _hmmc.backward_scaling(
np.asarray(self.startprob_), np.asarray(self.transmat_),
frameprob, scaling_factors)
return bwdlattice
def _do_backward_log_pass(self, log_frameprob):
bwdlattice = _hmmc.backward_log(
log_mask_zero(self.startprob_), log_mask_zero(self.transmat_),
log_frameprob)
return bwdlattice
def _compute_posteriors_scaling(self, fwdlattice, bwdlattice):
posteriors = fwdlattice * bwdlattice
normalize(posteriors, axis=1)
return posteriors
def _compute_posteriors_log(self, fwdlattice, bwdlattice):
# gamma is guaranteed to be correctly normalized by log_prob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
log_gamma = fwdlattice + bwdlattice
log_normalize(log_gamma, axis=1)
with np.errstate(under="ignore"):
return np.exp(log_gamma)
def _needs_init(self, code, name):
if code in self.init_params:
if hasattr(self, name):
_log.warning(
"Even though the %r attribute is set, it will be "
"overwritten during initialization because 'init_params' "
"contains %r", name, code)
return True
if not hasattr(self, name):
return True
return False
def _get_n_fit_scalars_per_param(self):
"""
Return a mapping of fittable parameter names (as in ``self.params``)
to the number of corresponding scalar parameters that will actually be
fitted.
This is used to detect whether the user did not pass enough data points
for a non-degenerate fit.
"""
def _init(self, X):
"""
Initialize model parameters prior to fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
"""
init = 1. / self.n_components
if self._needs_init("s", "startprob_"):
self.startprob_ = np.full(self.n_components, init)
if self._needs_init("t", "transmat_"):
self.transmat_ = np.full((self.n_components, self.n_components),
init)
n_fit_scalars_per_param = self._get_n_fit_scalars_per_param()
if n_fit_scalars_per_param is not None:
n_fit_scalars = sum(
n_fit_scalars_per_param[p] for p in self.params)
if X.size < n_fit_scalars:
_log.warning(
"Fitting a model with %d free scalar parameters with only "
"%d data points will result in a degenerate solution.",
n_fit_scalars, X.size)
def _check_sum_1(self, name):
"""Check that an array describes one or more distributions."""
s = getattr(self, name).sum(axis=-1)
if not np.allclose(s, 1):
raise ValueError(
f"{name} must sum to 1 (got {s:.4f})" if s.ndim == 0 else
f"{name} rows must sum to 1 (got {s})" if s.ndim == 1 else
"Expected 1D or 2D array")
def _check(self):
"""
Validate model parameters prior to fitting.
Raises
------
ValueError
If any of the parameters are invalid, e.g. if :attr:`startprob_`
don't sum to 1.
"""
self.startprob_ = np.asarray(self.startprob_)
if len(self.startprob_) != self.n_components:
raise ValueError("startprob_ must have length n_components")
self._check_sum_1("startprob_")
self.transmat_ = np.asarray(self.transmat_)
if self.transmat_.shape != (self.n_components, self.n_components):
raise ValueError(
"transmat_ must have shape (n_components, n_components)")
self._check_sum_1("transmat_")
def _compute_likelihood(self, X):
"""
Compute per-component probability under the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
Returns
-------
log_prob : array, shape (n_samples, n_components)
Log probability of each sample in ``X`` for each of the
model states.
"""
if self._compute_log_likelihood != \
BaseHMM._compute_log_likelihood.__get__(self): # prevent recursion
return np.exp(self._compute_log_likelihood(X))
else:
raise NotImplementedError("Must be overridden in subclass")
def _compute_log_likelihood(self, X):
"""
Compute per-component emission log probability under the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
Returns
-------
log_prob : array, shape (n_samples, n_components)
Emission log probability of each sample in ``X`` for each of the
model states, i.e., ``log(p(X|state))``.
"""
if self._compute_likelihood != \
BaseHMM._compute_likelihood.__get__(self): # prevent recursion
return np.log(self._compute_likelihood(X))
else:
raise NotImplementedError("Must be overridden in subclass")
def _generate_sample_from_state(self, state, random_state=None):
"""
Generate a random sample from a given component.
Parameters
----------
state : int
Index of the component to condition on.
random_state: RandomState or an int seed
A random number generator instance. If ``None``, the object's
``random_state`` is used.
Returns
-------
X : array, shape (n_features, )
A random sample from the emission distribution corresponding
to a given component.
"""
return ()
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
"""
Initialize sufficient statistics required for M-step.
The method is *pure*, meaning that it doesn't change the state of
the instance. For extensibility computed statistics are stored
in a dictionary.
Returns
-------
nobs : int
Number of samples in the data.
start : array, shape (n_components, )
An array where the i-th element corresponds to the posterior
probability of the first sample being generated by the i-th state.
trans : array, shape (n_components, n_components)
An array where the (i, j)-th element corresponds to the posterior
probability of transitioning between the i-th to j-th states.
"""
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(
self, stats, X, lattice, posteriors, fwdlattice, bwdlattice):
"""
Update sufficient statistics from a given sample.
Parameters
----------
stats : dict
Sufficient statistics as returned by
:meth:`~.BaseHMM._initialize_sufficient_statistics`.
X : array, shape (n_samples, n_features)
Sample sequence.
lattice : array, shape (n_samples, n_components)
Probabilities OR Log Probabilities of each sample
under each of the model states. Depends on the choice
of implementation of the Forward-Backward algorithm
posteriors : array, shape (n_samples, n_components)
Posterior probabilities of each sample being generated by each
of the model states.
fwdlattice, bwdlattice : array, shape (n_samples, n_components)
forward and backward probabilities.
"""
impl = {
"scaling": self._accumulate_sufficient_statistics_scaling,
"log": self._accumulate_sufficient_statistics_log,
}[self.implementation]
return impl(stats=stats, X=X, lattice=lattice, posteriors=posteriors,
fwdlattice=fwdlattice, bwdlattice=bwdlattice)
def _accumulate_sufficient_statistics_scaling(
self, stats, X, lattice, posteriors, fwdlattice, bwdlattice):
"""
Implementation of `_accumulate_sufficient_statistics`
for ``implementation = "log"``.
"""
stats['nobs'] += 1
if 's' in self.params:
stats['start'] += posteriors[0]
if 't' in self.params:
n_samples, n_components = lattice.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_samples <= 1:
return
xi_sum = _hmmc.compute_scaling_xi_sum(
fwdlattice, self.transmat_, bwdlattice, lattice)
stats['trans'] += xi_sum
def _accumulate_sufficient_statistics_log(
self, stats, X, lattice, posteriors, fwdlattice, bwdlattice):
"""
Implementation of `_accumulate_sufficient_statistics`
for ``implementation = "log"``.
"""
stats['nobs'] += 1
if 's' in self.params:
stats['start'] += posteriors[0]
if 't' in self.params:
n_samples, n_components = lattice.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_samples <= 1:
return
log_xi_sum = _hmmc.compute_log_xi_sum(
fwdlattice, log_mask_zero(self.transmat_), bwdlattice, lattice)
with np.errstate(under="ignore"):
stats['trans'] += np.exp(log_xi_sum)
def _do_mstep(self, stats):
"""
Perform the M-step of EM algorithm.
Parameters
----------
stats : dict
Sufficient statistics updated from all available samples.
"""
# If a prior is < 1, `prior - 1 + starts['start']` can be negative. In
# that case maximization of (n1+e1) log p1 + ... + (ns+es) log ps under
# the conditions sum(p) = 1 and all(p >= 0) show that the negative
# terms can just be set to zero.
# The ``np.where`` calls guard against updating forbidden states
# or transitions in e.g. a left-right HMM.
if 's' in self.params:
startprob_ = np.maximum(self.startprob_prior - 1 + stats['start'],
0)
self.startprob_ = np.where(self.startprob_ == 0, 0, startprob_)
normalize(self.startprob_)
if 't' in self.params:
transmat_ = np.maximum(self.transmat_prior - 1 + stats['trans'], 0)
self.transmat_ = np.where(self.transmat_ == 0, 0, transmat_)
normalize(self.transmat_, axis=1)
_BaseHMM = BaseHMM # Backcompat name, will be deprecated in the future.
|
|
"""
Dynamic Factors model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
from .model import Model
from .tools import (
concat
)
class FAVAR(Model):
r"""
Factor-Augmented Vector Autoregression
Parameters
----------
observed : array_like
The observed fundamental time-series process :math:`y`
informational : array_like, optional
The observed informational time-series :math:`x`
k_factors : int
The number of unobserved factors
order : int
The order of the vector autoregression
Notes
-----
**Model**
The FAVAR model can be written as (see [1]_ for all notation and details):
.. math::
\begin{bmatrix} F_t \\ Y_t \end{bmatrix} & = \Phi(L)
\begin{bmatrix} F_{t-1} \\ Y_{t-1} \end{bmatrix} + v_t \\
X_t & = \Lambda^f F_t + \Lambda^y Y_t + e_t \\
e_t \sim N(0, R) \\
v_t \sim N(0, Q)
with :math:`R` diagonal and :math:`Q` unrestricted. Let the dimensions of
the model be denoted:
- :math:`X_t` is :math:`N \times 1`
- :math:`F_t` is :math:`K \times 1`
- :math:`Y_t` is :math:`M \times 1`
**State-space Representation**
The mapping to state-space form is fairly straightforward, with:
.. math::
Z_t & = \begin{bmatrix} \Lambda^f & \Lambda^y \\ 0 & I \end{bmatrix} \\
T_t & = T_{\Phi(L)}^c\\
where :T_{\Phi(L)}^c: represents the companion matrix associated with the
VAR(1) representation of :math:`\Phi(L)`.
**Identification** (see [1]_, section II.D)
Since the combination of the factors and the factor loading matrix are
(jointly) fundamentally indeterminate, the first level of identifying
restrictions require:
.. math::
\Lambda^f & = \begin{bmatrix} I_{K \times K} \\ \tilde \Lambda^f \end{bmatrix} \\
\Lambda^y & = \begin{bmatrix} 0_{K \times M} \\ \tilde \Lambda^y \end{bmatrix} \\
where:
- :math:`\tilde \Lambda^f` is :math:`N-K \times K`
- :math:`\tilde \Lambda^y` is :math:`N-K \times M`
Additional identifying restrictions (e.g. for the identification of
structural shocks) can be placed on the lag polynomial :math:`\Phi(L)`. In
particular, we assume (as in [1]_) a recursive ordering of the factors and
observables, such that:
- :math:`F_{1,t}` can depend only on lagged values
- :math:`F_{2,t}` can depend only on :math:`F_{1,t}` and lagged values
- ...
- :math:`F_{i,t}` can depend only on :math:`F_{1,t}, \dots, F_{i-1, t}` and lagged values
- ...
- :math:`Y_{1,t}` can depend only on `F_{t-1}`
- ...
- :math:`Y_{M,t}` can depend on all other variables contemporaneously
**Parameters**
There are parameters to be estimated in the following matrices:
- :math:`\tilde \Lambda^f`: :math:`(N-K) \times K` parameters (due to fundamental identifying restriction)
- :math:`\tilde \Lambda^y`: :math:`(N-K) \times M` parameters (due to fundamental identifying restriction)
- :math:`R`: :math:`N` parameters (restriction to uncorrelated series - diagonal matrix)
- :math:`T_{\Phi(L)}^c`: :math:`d*(K+M)^2: (where d is the lag order)
- :math:`Q`: :math:`(K+M)**2` (unrestricted state covariance matrix)
In total, there are:
.. math::
(N-K) * (K+M) + N + d*(K+M)^2 + (K+M)^2 = [(N-K) + (d+1)*(K+M)] (K+M) + N
For example, if as in [1]_, :math:`N=109, d=1, K=5, M=1`, then the number
of parameters is 805.
References
----------
.. [1] Bernanke, Ben S., Jean Boivin, and Piotr Eliasz. 2005.
"Measuring the Effects of Monetary Policy: A Factor-Augmented Vector
Autoregressive (FAVAR) Approach."
The Quarterly Journal of Economics 120 (1): 387-422.
.. [2] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, observed, informational, k_factors, order=1, *args, **kwargs):
# Model orders
self.k_obs = observed.shape[1] if observed.ndim > 1 else 1
self.k_info = informational.shape[1] if informational.ndim > 1 else 1
self.k_factors = k_factors
self.order = order
k_posdef = k_factors + self.k_obs
k_states = self.order * k_posdef
# Parameter dimensions
self.k_loadings = (self.k_info - self.k_factors) * k_posdef
self.k_ar = self.order * k_posdef
self.k_var = k_posdef * self.k_ar
self.k_params = (
self.k_loadings + # design
self.k_info + # (diagonal) obs cov
self.k_var + # lag polynomial
k_posdef**2 # state cov
)
# Construct the endogenous vector from the background and observed
endog = concat((informational, observed), axis=1)
# Initialize the statespace
super(FAVAR, self).__init__(
endog, k_states=k_states, k_posdef=k_posdef, *args, **kwargs
)
# Initialize known elements of the state space matrices
# Note: no need to zet zeroing restrictions, because the matrices are
# initialized to zeros. The commented lines are left for documenation
# purposes.
# The design matrix is the matrix of factor loadings, Lambda
# From the fundamental identification issue, we can immediately set:
self['design', :self.k_factors, :self.k_factors] = np.eye(self.k_factors)
# self['design', :self.k_info, self.k_info:] = np.zeros(self.k_info, self.k_info)
self['design', -self.k_obs:, self.k_factors:self.k_posdef] = np.eye(self.k_obs)
# The observation covariance matrix has zeros in the last M rows and
# columns, due to the way the observed series are integrated into the
# measurement equation. But for now, we cannot accomodate a positive
# semi-definite matrix, so make those diagonal elements very small
self['obs_cov', -self.k_obs:, -self.k_obs:] = np.eye(self.k_obs)*1e-6
# Initialize the transition matrix for a VAR structure
if self.order > 1:
self['transition', self.k_posdef:, :-self.k_posdef, 0] = np.eye(self.k_ar - self.k_posdef)
# Identity for the selection matrix
self['selection'] = np.zeros((self.k_states, self.k_posdef))
self['selection', :self.k_posdef, :, 0] = np.eye(self.k_posdef)
# Cache some slices
start = 0; end = self.k_loadings;
self._params_loadings = np.s_[start:end]
start += self.k_loadings; end += self.k_info;
self._params_obs_cov = np.s_[start:end]
start += self.k_info; end += self.k_var;
self._params_transition = np.s_[start:end]
start += self.k_var; end += self.k_posdef**2;
self._params_state_cov = np.s_[start:end]
# Cache some indices
self._design_idx = np.s_['design', self.k_factors:-self.k_obs, :self.k_posdef, 0]
self._obs_cov_idx = ('obs_cov',) + np.diag_indices(self.k_info) + (0,)
self._transition_idx = np.s_['transition', :self.k_posdef, :, 0]
# Initialize as stationary
self.initialize_stationary()
# Set to use the univariate filter with observation collapsing
self.filter_collapsed = True
self.filter_univariate = True
self._initialize_representation()
self._statespace.subset_design = True
self._statespace.companion_transition = True
def _get_model_names(self, latex=False):
return np.arange(self.k_params)
@property
def start_params(self):
# Regress each X on Y, save OLS estimates and variances
betas = []
variances = [1] * self.k_factors # the covariances for the zeroes factor loadings
exog = self.endog[-1][:,None]
exog_pinv = np.linalg.pinv(exog)
for i in range(self.k_factors, self.k_info - self.k_obs + 1):
endog = self.endog[i]
ols = exog_pinv.dot(endog)
resid = endog - np.dot(exog, ols)
betas.append(ols[0])
variances.append(np.dot(resid.T, resid) / (self.nobs - 1))
# Construct the final start parameters
start_loadings = np.zeros((self.k_info - self.k_factors, self.k_posdef))
start_loadings[:,-1] = betas
return np.r_[
start_loadings.reshape(start_loadings.size,),
variances, # diagonals of obs cov
[0] * self.k_var, # lag polynomial
np.eye(self.k_posdef).reshape(self.k_posdef**2,), # state cov
]
def update(self, params, *args, **kwargs):
params = super(FAVAR, self).update(params, *args, **kwargs)
# Update factor loadings (design matrix)
self[self._design_idx] = np.reshape(
params[self._params_loadings],
(self.k_info - self.k_factors, self.k_posdef)
)
# Update observation covariance matrix
self[self._obs_cov_idx] = params[self._params_obs_cov]
# Update VAR lag polynomial (transition matrix)
self[self._transition_idx] = np.reshape(
params[self._params_transition], (self.k_posdef, self.k_states)
)
# Update state covariance matrix
self['state_cov', :] = np.reshape(
params[self._params_state_cov], (self.k_posdef, self.k_posdef, 1)
)
|
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import enum
from oslo_config import cfg
from oslo_log import log as logging
from six.moves import urllib
import webob
from cinder.common import constants
from cinder import exception
from cinder.i18n import _
import cinder.policy
from cinder import utils
api_common_opts = [
cfg.IntOpt('osapi_max_limit',
default=1000,
help='The maximum number of items that a collection '
'resource returns in a single response'),
cfg.StrOpt('osapi_volume_base_URL',
help='Base URL that will be presented to users in links '
'to the OpenStack Volume API',
deprecated_name='osapi_compute_link_prefix'),
cfg.ListOpt('query_volume_filters',
default=['name', 'status', 'metadata',
'availability_zone',
'bootable', 'group_id'],
help="Volume filter options which "
"non-admin user could use to "
"query volumes. Default values "
"are: ['name', 'status', "
"'metadata', 'availability_zone' ,"
"'bootable', 'group_id']")
]
CONF = cfg.CONF
CONF.register_opts(api_common_opts)
LOG = logging.getLogger(__name__)
METADATA_TYPES = enum.Enum('METADATA_TYPES', 'user image')
# Regex that matches alphanumeric characters, periods, hyphens,
# colons and underscores:
# ^ assert position at start of the string
# [\w\.\-\:\_] match expression
# $ assert position at end of the string
VALID_KEY_NAME_REGEX = re.compile(r"^[\w\.\-\:\_]+$", re.UNICODE)
def validate_key_names(key_names_list):
"""Validate each item of the list to match key name regex."""
for key_name in key_names_list:
if not VALID_KEY_NAME_REGEX.match(key_name):
return False
return True
def validate_policy(context, action):
try:
cinder.policy.enforce_action(context, action)
return True
except exception.PolicyNotAuthorized:
return False
def get_pagination_params(params, max_limit=None):
"""Return marker, limit, offset tuple from request.
:param params: `wsgi.Request`'s GET dictionary, possibly containing
'marker', 'limit', and 'offset' variables. 'marker' is the
id of the last element the client has seen, 'limit' is the
maximum number of items to return and 'offset' is the number
of items to skip from the marker or from the first element.
If 'limit' is not specified, or > max_limit, we default to
max_limit. Negative values for either offset or limit will
cause exc.HTTPBadRequest() exceptions to be raised. If no
offset is present we'll default to 0 and if no marker is
present we'll default to None.
:max_limit: Max value 'limit' return value can take
:returns: Tuple (marker, limit, offset)
"""
max_limit = max_limit or CONF.osapi_max_limit
limit = _get_limit_param(params, max_limit)
marker = _get_marker_param(params)
offset = _get_offset_param(params)
return marker, limit, offset
def _get_limit_param(params, max_limit=None):
"""Extract integer limit from request's dictionary or fail.
Defaults to max_limit if not present and returns max_limit if present
'limit' is greater than max_limit.
"""
max_limit = max_limit or CONF.osapi_max_limit
try:
limit = int(params.pop('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(limit, max_limit)
return limit
def _get_marker_param(params):
"""Extract marker id from request's dictionary (defaults to None)."""
return params.pop('marker', None)
def _get_offset_param(params):
"""Extract offset id from request's dictionary (defaults to 0) or fail."""
offset = params.pop('offset', 0)
return utils.validate_integer(offset, 'offset', 0, constants.DB_MAX_INT)
def limited(items, request, max_limit=None):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
max_limit = max_limit or CONF.osapi_max_limit
marker, limit, offset = get_pagination_params(request.GET.copy(),
max_limit)
range_end = offset + (limit or max_limit)
return items[offset:range_end]
def limited_by_marker(items, request, max_limit=None):
"""Return a slice of items according to the requested marker and limit."""
max_limit = max_limit or CONF.osapi_max_limit
marker, limit, __ = get_pagination_params(request.GET.copy(), max_limit)
start_index = 0
if marker:
start_index = -1
for i, item in enumerate(items):
if 'flavorid' in item:
if item['flavorid'] == marker:
start_index = i + 1
break
elif item['id'] == marker or item.get('uuid') == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker [%s] not found') % marker
raise webob.exc.HTTPBadRequest(explanation=msg)
range_end = start_index + limit
return items[start_index:range_end]
def get_sort_params(params, default_key='created_at', default_dir='desc'):
"""Retrieves sort keys/directions parameters.
Processes the parameters to create a list of sort keys and sort directions
that correspond to either the 'sort' parameter or the 'sort_key' and
'sort_dir' parameter values. The value of the 'sort' parameter is a comma-
separated list of sort keys, each key is optionally appended with
':<sort_direction>'.
Note that the 'sort_key' and 'sort_dir' parameters are deprecated in kilo
and an exception is raised if they are supplied with the 'sort' parameter.
The sort parameters are removed from the request parameters by this
function.
:param params: webob.multidict of request parameters (from
cinder.api.openstack.wsgi.Request.params)
:param default_key: default sort key value, added to the list if no
sort keys are supplied
:param default_dir: default sort dir value, added to the list if the
corresponding key does not have a direction
specified
:returns: list of sort keys, list of sort dirs
:raise webob.exc.HTTPBadRequest: If both 'sort' and either 'sort_key' or
'sort_dir' are supplied parameters
"""
if 'sort' in params and ('sort_key' in params or 'sort_dir' in params):
msg = _("The 'sort_key' and 'sort_dir' parameters are deprecated and "
"cannot be used with the 'sort' parameter.")
raise webob.exc.HTTPBadRequest(explanation=msg)
sort_keys = []
sort_dirs = []
if 'sort' in params:
for sort in params.pop('sort').strip().split(','):
sort_key, _sep, sort_dir = sort.partition(':')
if not sort_dir:
sort_dir = default_dir
sort_keys.append(sort_key.strip())
sort_dirs.append(sort_dir.strip())
else:
sort_key = params.pop('sort_key', default_key)
sort_dir = params.pop('sort_dir', default_dir)
sort_keys.append(sort_key.strip())
sort_dirs.append(sort_dir.strip())
return sort_keys, sort_dirs
def get_request_url(request):
url = request.application_url
headers = request.headers
forwarded = headers.get('X-Forwarded-Host')
if forwarded:
url_parts = list(urllib.parse.urlsplit(url))
url_parts[1] = re.split(',\s?', forwarded)[-1]
url = urllib.parse.urlunsplit(url_parts).rstrip('/')
return url
def remove_version_from_href(href):
"""Removes the first api version from the href.
Given: 'http://cinder.example.com/v1.1/123'
Returns: 'http://cinder.example.com/123'
Given: 'http://cinder.example.com/v1.1'
Returns: 'http://cinder.example.com'
Given: 'http://cinder.example.com/volume/drivers/v1.1/flashsystem'
Returns: 'http://cinder.example.com/volume/drivers/flashsystem'
"""
parsed_url = urllib.parse.urlsplit(href)
url_parts = parsed_url.path.split('/', 2)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
for x in range(len(url_parts)):
if expression.match(url_parts[x]):
del url_parts[x]
break
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
msg = 'href %s does not contain version' % href
LOG.debug(msg)
raise ValueError(msg)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urllib.parse.urlunsplit(parsed_url)
class ViewBuilder(object):
"""Model API responses as dictionaries."""
_collection_name = None
def _get_links(self, request, identifier):
return [{"rel": "self",
"href": self._get_href_link(request, identifier), },
{"rel": "bookmark",
"href": self._get_bookmark_link(request, identifier), }]
def _get_next_link(self, request, identifier, collection_name):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_link_prefix(get_request_url(request),
CONF.osapi_volume_base_URL)
url = os.path.join(prefix,
request.environ["cinder.context"].project_id,
collection_name)
return "%s?%s" % (url, urllib.parse.urlencode(params))
def _get_href_link(self, request, identifier):
"""Return an href string pointing to this object."""
prefix = self._update_link_prefix(get_request_url(request),
CONF.osapi_volume_base_URL)
return os.path.join(prefix,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(get_request_url(request))
base_url = self._update_link_prefix(base_url,
CONF.osapi_volume_base_URL)
return os.path.join(base_url,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_collection_links(self, request, items, collection_name,
item_count=None, id_key="uuid"):
"""Retrieve 'next' link, if applicable.
The next link is included if we are returning as many items as we can,
given the restrictions of limit optional request parameter and
osapi_max_limit configuration parameter as long as we are returning
some elements.
So we return next link if:
1) 'limit' param is specified and equal to the number of items.
2) 'limit' param is NOT specified and the number of items is
equal to CONF.osapi_max_limit.
:param request: API request
:param items: List of collection items
:param collection_name: Name of collection, used to generate the
next link for a pagination query
:param item_count: Length of the list of the original collection
items
:param id_key: Attribute key used to retrieve the unique ID, used
to generate the next link marker for a pagination query
:returns: links
"""
item_count = item_count or len(items)
limit = _get_limit_param(request.GET.copy())
if len(items) and limit <= item_count:
return self._generate_next_link(items, id_key, request,
collection_name)
return []
def _generate_next_link(self, items, id_key, request,
collection_name):
links = []
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
else:
last_item_id = last_item["id"]
links.append({
"rel": "next",
"href": self._get_next_link(request, last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urllib.parse.urlsplit(orig_url))
prefix_parts = list(urllib.parse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
url_parts[2] = prefix_parts[2] + url_parts[2]
return urllib.parse.urlunsplit(url_parts).rstrip('/')
def get_cluster_host(req, params, cluster_version=None):
"""Get cluster and host from the parameters.
This method checks the presence of cluster and host parameters and returns
them depending on the cluster_version.
If cluster_version is False we will never return the cluster_name and we
will require the presence of the host parameter.
If cluster_version is None we will always check for the presence of the
cluster parameter, and if cluster_version is a string with a version we
will only check for the presence of the parameter if the version of the
request is not less than it. In both cases we will require one and only
one parameter, host or cluster.
"""
if (cluster_version is not False and
req.api_version_request.matches(cluster_version)):
cluster_name = params.get('cluster')
msg = _('One and only one of cluster and host must be set.')
else:
cluster_name = None
msg = _('Host field is missing.')
host = params.get('host')
if bool(cluster_name) == bool(host):
raise exception.InvalidInput(reason=msg)
return cluster_name, host
|
|
import pytest # pylint: disable=unused-import
import os
import pandas as pd
import numpy as np
import calliope
import calliope.exceptions as exceptions
from calliope.core.attrdict import AttrDict
from calliope.core.preprocess import time
from calliope.test.common.util import build_test_model as build_model
from calliope.test.common.util import \
constraint_sets, defaults, defaults_model, check_error_or_warning
class TestModelRun:
def test_model_from_dict(self):
"""
Test loading a file from dict/AttrDict instead of from YAML
"""
this_path = os.path.dirname(__file__)
model_location = os.path.join(this_path, 'common', 'test_model', 'model.yaml')
model_dict = AttrDict.from_yaml(model_location)
location_dict = AttrDict({
'locations': {
'0': {'techs': {'test_supply_elec': {}, 'test_demand_elec': {}}},
'1': {'techs': {'test_supply_elec': {}, 'test_demand_elec': {}}}
}
})
model_dict.union(location_dict)
model_dict.model['timeseries_data_path'] = os.path.join(
this_path, 'common', 'test_model', model_dict.model['timeseries_data_path']
)
# test as AttrDict
calliope.Model(model_dict)
# test as dict
calliope.Model(model_dict.as_dict())
def test_valid_scenarios(self):
"""
Test that valid scenario definition raises no error and results in applied scenario.
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
scenario_1: ['one', 'two']
overrides:
one:
techs.test_supply_gas.constraints.energy_cap_max: 20
two:
techs.test_supply_elec.constraints.energy_cap_max: 20
locations:
0:
techs:
test_supply_gas:
test_supply_elec:
test_demand_elec:
"""
)
model = build_model(override_dict=override, scenario='scenario_1')
assert model._model_run.locations['0'].techs.test_supply_gas.constraints.energy_cap_max == 20
assert model._model_run.locations['0'].techs.test_supply_elec.constraints.energy_cap_max == 20
def test_invalid_scenarios_dict(self):
"""
Test that invalid scenario definition raises appropriate error
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
scenario_1:
techs.foo.bar: 1
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override, scenario='scenario_1')
assert check_error_or_warning(error, 'Scenario definition must be a list of override names.')
def test_invalid_scenarios_str(self):
"""
Test that invalid scenario definition raises appropriate error
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
scenario_1: 'foo1,foo2'
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override, scenario='scenario_1')
assert check_error_or_warning(error, 'Scenario definition must be a list of override names.')
def test_scenario_name_overlaps_overrides(self):
"""
Test that a scenario name cannot be a combination of override names
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
'simple_supply,group_share_energy_cap_min': 'foobar'
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override, scenario='simple_supply,group_share_energy_cap_min')
assert check_error_or_warning(error, 'Manually defined scenario cannot be a combination of override names.')
def test_undefined_carriers(self):
"""
Test that user has input either carrier or carrier_in/_out for each tech
"""
override = AttrDict.from_yaml_string(
"""
techs:
test_undefined_carrier:
essentials:
parent: supply
name: test
constraints:
resource: .inf
energy_cap_max: .inf
locations.1.techs.test_undefined_carrier:
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario='simple_supply,one_day')
def test_conversion_plus_primary_carriers(self):
"""
Test that user has input input/output primary carriers for conversion_plus techs
"""
override1 = {'techs.test_conversion_plus.essentials.carrier_in': ['gas', 'coal']}
override2 = {'techs.test_conversion_plus.essentials.primary_carrier_in': 'coal'}
override3 = {'techs.test_conversion_plus.essentials.primary_carrier_out': 'coal'}
model = build_model({}, scenario='simple_conversion_plus,two_hours')
assert model._model_run.techs.test_conversion_plus.essentials.get_key(
'primary_carrier_in', None
) == 'gas'
# should fail: multiple carriers in, but no primary_carrier_in assigned
with pytest.raises(exceptions.ModelError) as error:
build_model(override1, scenario='simple_conversion_plus,two_hours')
assert check_error_or_warning(error, 'Primary_carrier_in must be assigned')
# should fail: primary_carrier_in not one of the carriers_in
with pytest.raises(exceptions.ModelError) as error:
build_model(override2, scenario='simple_conversion_plus,two_hours')
assert check_error_or_warning(error, 'Primary_carrier_in `coal` not one')
# should fail: primary_carrier_out not one of the carriers_out
with pytest.raises(exceptions.ModelError) as error:
build_model(override3, scenario='simple_conversion_plus,two_hours')
assert check_error_or_warning(error, 'Primary_carrier_out `coal` not one')
def test_incorrect_subset_time(self):
"""
If subset_time is a list, it must have two entries (start_time, end_time)
If subset_time is not a list, it should successfully subset on the given
string/integer
"""
override = lambda param: AttrDict.from_yaml_string(
"model.subset_time: {}".format(param)
)
# should fail: one string in list
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override(['2005-01']), scenario='simple_supply')
# should fail: three strings in list
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override(['2005-01-01', '2005-01-02', '2005-01-03']), scenario='simple_supply')
# should pass: two string in list as slice
model = build_model(override_dict=override(['2005-01-01', '2005-01-07']), scenario='simple_supply')
assert all(model.inputs.timesteps.to_index() == pd.date_range('2005-01', '2005-01-07 23:00:00', freq='H'))
# should pass: one integer/string
model = build_model(override_dict=override('2005-01'), scenario='simple_supply')
assert all(model.inputs.timesteps.to_index() == pd.date_range('2005-01', '2005-01-31 23:00:00', freq='H'))
# should fail: time subset out of range of input data
with pytest.raises(KeyError):
build_model(override_dict=override('2005-03'), scenario='simple_supply')
# should fail: time subset out of range of input data
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override(['2005-02-01', '2005-02-05']), scenario='simple_supply')
def test_incorrect_date_format(self):
"""
Test the date parser catches a different date format from file than
user input/default (inc. if it is just one line of a file that is incorrect)
"""
# should pass: changing datetime format from default
override1 = {
'model.timeseries_dateformat': "%d/%m/%Y %H:%M:%S",
'techs.test_demand_heat.constraints.resource': 'file=demand_heat_diff_dateformat.csv',
'techs.test_demand_elec.constraints.resource': 'file=demand_heat_diff_dateformat.csv'
}
model = build_model(override_dict=override1, scenario='simple_conversion')
assert all(model.inputs.timesteps.to_index() == pd.date_range('2005-01', '2005-02-01 23:00:00', freq='H'))
# should fail: wrong dateformat input for one file
override2 = {
'techs.test_demand_heat.constraints.resource': 'file=demand_heat_diff_dateformat.csv'
}
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override2, scenario='simple_conversion')
# should fail: wrong dateformat input for all files
override3 = {
'model.timeseries_dateformat': "%d/%m/%Y %H:%M:%S"
}
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override3, scenario='simple_supply')
# should fail: one value wrong in file
override4 = {
'techs.test_demand_heat.constraints.resource': 'file=demand_heat_wrong_dateformat.csv'
}
# check in output error that it points to: 07/01/2005 10:00:00
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override4, scenario='simple_conversion')
def test_inconsistent_time_indeces(self):
"""
Test that, including after any time subsetting, the indeces of all time
varying input data are consistent with each other
"""
# should fail: wrong length of demand_heat csv vs demand_elec
override1 = {
'techs.test_demand_heat.constraints.resource': 'file=demand_heat_wrong_length.csv'
}
# check in output error that it points to: 07/01/2005 10:00:00
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario='simple_conversion')
# should pass: wrong length of demand_heat csv, but time subsetting removes the difference
build_model(override_dict=override1, scenario='simple_conversion,one_day')
def test_empty_key_on_explode(self):
"""
On exploding locations (from ``'1--3'`` or ``'1,2,3'`` to
``['1', '2', '3']``), raise error on the resulting list being empty
"""
list1 = calliope.core.preprocess.locations.explode_locations('1--3')
list2 = calliope.core.preprocess.locations.explode_locations('1,2,3')
assert list1 == list2 == ['1', '2', '3']
def test_key_clash_on_set_loc_key(self):
"""
Raise error on attempted overwrite of information regarding a recently
exploded location
"""
override = {
'locations.0.test_supply_elec.constraints.resource': 10,
'locations.0,1.test_supply_elec.constraints.resource': 15
}
with pytest.raises(KeyError):
build_model(override_dict=override, scenario='simple_supply,one_day')
def test_calculate_depreciation(self):
"""
Technologies which define investment costs *must* define lifetime and
interest rate, so that a depreciation rate can be calculated.
If lifetime == inf and interested > 0, depreciation rate will be inf, so
we want to avoid that too.
"""
override1 = {
'techs.test_supply_elec.costs.monetary.energy_cap': 10
}
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override1, scenario='simple_supply,one_day')
assert check_error_or_warning(
error, 'Must specify constraints.lifetime and costs.monetary.interest_rate'
)
override2 = {
'techs.test_supply_elec.constraints.lifetime': 10,
'techs.test_supply_elec.costs.monetary.energy_cap': 10
}
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override2, scenario='simple_supply,one_day')
assert check_error_or_warning(
error, 'Must specify constraints.lifetime and costs.monetary.interest_rate'
)
override3 = {
'techs.test_supply_elec.costs.monetary.interest_rate': 0.1,
'techs.test_supply_elec.costs.monetary.energy_cap': 10
}
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override3, scenario='simple_supply,one_day')
assert check_error_or_warning(
error, 'Must specify constraints.lifetime and costs.monetary.interest_rate'
)
override4 = {
'techs.test_supply_elec.constraints.lifetime': 10,
'techs.test_supply_elec.costs.monetary.interest_rate': 0,
'techs.test_supply_elec.costs.monetary.energy_cap': 10
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override4, scenario='simple_supply,one_day')
assert check_error_or_warning(excinfo, '`monetary` interest rate of zero')
override5 = {
'techs.test_supply_elec.constraints.lifetime': np.inf,
'techs.test_supply_elec.costs.monetary.interest_rate': 0,
'techs.test_supply_elec.costs.monetary.energy_cap': 10
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override5, scenario='simple_supply,one_day')
assert check_error_or_warning(
excinfo, 'No investment monetary cost will be incurred for `test_supply_elec`'
)
override6 = {
'techs.test_supply_elec.constraints.lifetime': np.inf,
'techs.test_supply_elec.costs.monetary.interest_rate': 0.1,
'techs.test_supply_elec.costs.monetary.energy_cap': 10
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override6, scenario='simple_supply,one_day')
assert check_error_or_warning(
excinfo, 'No investment monetary cost will be incurred for `test_supply_elec`'
)
override7 = {
'techs.test_supply_elec.constraints.lifetime': 10,
'techs.test_supply_elec.costs.monetary.interest_rate': 0.1,
'techs.test_supply_elec.costs.monetary.energy_cap': 10
}
build_model(override_dict=override7, scenario='simple_supply,one_day')
def test_delete_interest_rate(self):
"""
If only 'interest_rate' is given in the cost class for a technology, we
should be able to handle deleting it without leaving an empty cost key.
"""
override1 = {
'techs.test_supply_elec.costs.monetary.interest_rate': 0.1
}
m = build_model(override_dict=override1, scenario='simple_supply,one_day')
assert 'loc_techs_cost' not in m._model_data.dims
class TestChecks:
def test_unrecognised_config_keys(self):
"""
Check that the only top level keys can be 'model', 'run', 'locations',
'techs', 'tech_groups' (+ 'config_path', but that is an internal addition)
"""
override = {'nonsensical_key': 'random_string'}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override, scenario='simple_supply')
assert check_error_or_warning(
excinfo, 'Unrecognised top-level configuration item: nonsensical_key'
)
def test_unrecognised_model_run_keys(self):
"""
Check that the only keys allowed in 'model' and 'run' are those in the
model defaults
"""
override1 = {'model.nonsensical_key': 'random_string'}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override1, scenario='simple_supply')
assert check_error_or_warning(
excinfo, 'Unrecognised setting in model configuration: nonsensical_key'
)
override2 = {'run.nonsensical_key': 'random_string'}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override2, scenario='simple_supply')
assert check_error_or_warning(
excinfo, 'Unrecognised setting in run configuration: nonsensical_key'
)
# A key that should be in run but is given in model
override3 = {'model.solver': 'glpk'}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override3, scenario='simple_supply')
assert check_error_or_warning(
excinfo, 'Unrecognised setting in model configuration: solver'
)
# A key that should be in model but is given in run
override4 = {'run.subset_time': None}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override4, scenario='simple_supply')
assert check_error_or_warning(
excinfo, 'Unrecognised setting in run configuration: subset_time'
)
override5 = {
'run.objective': 'minmax_cost_optimization',
'run.objective_options': {
'cost_class': 'monetary',
'sense': 'minimize',
'unused_option': 'some_value'
}
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override5, scenario='simple_supply')
assert check_error_or_warning(
excinfo, 'Objective function argument `unused_option` given but not used by objective function `minmax_cost_optimization`'
)
def test_model_version_mismatch(self):
"""
Model config says model.calliope_version = 0.1, which is not what we
are running, so we want a warning.
"""
override = {'model.calliope_version': 0.1}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override, scenario='simple_supply,one_day')
assert check_error_or_warning(excinfo, 'Model configuration specifies calliope_version')
def test_unknown_carrier_tier(self):
"""
User can only use 'carrier_' + ['in', 'out', 'in_2', 'out_2', 'in_3',
'out_3', 'ratios']
"""
override1 = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.essentials.carrier_1: power
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario='simple_supply,one_day')
override2 = AttrDict.from_yaml_string(
"""
techs.test_conversion_plus.essentials.carrier_out_4: power
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override2, scenario='simple_conversion_plus,one_day')
def test_name_overlap(self):
"""
No tech may have the same identifier as a tech group
"""
override = AttrDict.from_yaml_string(
"""
techs:
supply:
essentials:
name: Supply tech
carrier: gas
parent: supply
constraints:
energy_cap_max: 10
resource: .inf
locations:
1.techs.supply:
0.techs.supply:
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario='one_day')
def test_abstract_base_tech_group_override(self):
"""
Abstract base technology groups can be overridden
"""
override = AttrDict.from_yaml_string(
"""
tech_groups:
supply:
constraints:
lifetime: 25
locations:
1.techs.test_supply_elec:
1.techs.test_demand_elec:
"""
)
build_model(override_dict=override, scenario='one_day')
def test_unspecified_parent(self):
"""
All technologies and technology groups must specify a parent
"""
override = AttrDict.from_yaml_string(
"""
techs.test_supply_no_parent:
essentials:
name: Supply tech
carrier: gas
constraints:
energy_cap_max: 10
resource: .inf
locations.1.test_supply_no_parent:
"""
)
with pytest.raises(KeyError):
build_model(override_dict=override, scenario='simple_supply,one_day')
def test_tech_as_parent(self):
"""
All technologies and technology groups must specify a parent
"""
override1 = AttrDict.from_yaml_string(
"""
techs.test_supply_tech_parent:
essentials:
name: Supply tech
carrier: gas
parent: test_supply_elec
constraints:
energy_cap_max: 10
resource: .inf
locations.1.test_supply_tech_parent:
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override1, scenario='simple_supply,one_day')
check_error_or_warning(error, 'tech `test_supply_tech_parent` has another tech as a parent')
override2 = AttrDict.from_yaml_string(
"""
tech_groups.test_supply_group:
essentials:
carrier: gas
parent: test_supply_elec
constraints:
energy_cap_max: 10
resource: .inf
techs.test_supply_tech_parent.essentials:
name: Supply tech
parent: test_supply_group
locations.1.test_supply_tech_parent:
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override2, scenario='simple_supply,one_day')
check_error_or_warning(error, 'tech_group `test_supply_group` has a tech as a parent')
def test_resource_as_carrier(self):
"""
No carrier in technology or technology group can be called `resource`
"""
override1 = AttrDict.from_yaml_string(
"""
techs:
test_supply_elec:
essentials:
name: Supply tech
carrier: resource
parent: supply
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario='simple_supply,one_day')
override2 = AttrDict.from_yaml_string(
"""
tech_groups:
test_supply_group:
essentials:
name: Supply tech
carrier: resource
parent: supply
techs.test_supply_elec.essentials.parent: test_supply_group
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override2, scenario='simple_supply,one_day')
def test_missing_constraints(self):
"""
A technology must define at least one constraint.
"""
override = AttrDict.from_yaml_string(
"""
techs:
supply_missing_constraint:
essentials:
parent: supply
carrier: electricity
name: supply missing constraint
locations.1.techs.supply_missing_constraint:
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario='simple_supply,one_day')
def test_missing_required_constraints(self):
"""
A technology within an abstract base technology must define a subset of
hardcoded constraints in order to function
"""
# should fail: missing one of ['energy_cap_max', 'energy_cap_equals', 'energy_cap_per_unit']
override_supply1 = AttrDict.from_yaml_string(
"""
techs:
supply_missing_constraint:
essentials:
parent: supply
carrier: electricity
name: supply missing constraint
constraints:
resource_area_max: 10
locations.1.techs.supply_missing_constraint:
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override_supply1, scenario='simple_supply,one_day')
# should pass: giving one of ['energy_cap_max', 'energy_cap_equals', 'energy_cap_per_unit']
override_supply2 = AttrDict.from_yaml_string(
"""
techs:
supply_missing_constraint:
essentials:
parent: supply
carrier: electricity
name: supply missing constraint
constraints.energy_cap_max: 10
locations.1.techs.supply_missing_constraint:
"""
)
build_model(override_dict=override_supply2, scenario='simple_supply,one_day')
def test_defining_non_allowed_constraints(self):
"""
A technology within an abstract base technology can only define a subset
of hardcoded constraints, anything else will not be implemented, so are
not allowed for that technology. This includes misspellings
"""
# should fail: storage_cap_max not allowed for supply tech
override_supply1 = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.constraints.storage_cap_max: 10
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override_supply1, scenario='simple_supply,one_day')
def test_defining_non_allowed_costs(self):
"""
A technology within an abstract base technology can only define a subset
of hardcoded costs, anything else will not be implemented, so are
not allowed for that technology. This includes misspellings
"""
# should fail: storage_cap_max not allowed for supply tech
override = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.costs.monetary.storage_cap: 10
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario='simple_supply,one_day')
# should fail: om_prod not allowed for demand tech
override = AttrDict.from_yaml_string(
"""
techs.test_demand_elec.costs.monetary.om_prod: 10
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario='simple_supply,one_day')
def test_exporting_unspecified_carrier(self):
"""
User can only define an export carrier if it is defined in
['carrier_out', 'carrier_out_2', 'carrier_out_3']
"""
override_supply = lambda param: AttrDict.from_yaml_string(
"techs.test_supply_elec.constraints.export_carrier: {}".format(param)
)
override_converison_plus = lambda param: AttrDict.from_yaml_string(
"techs.test_conversion_plus.constraints.export_carrier: {}".format(param)
)
# should fail: exporting `heat` not allowed for electricity supply tech
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override_supply('heat'), scenario='simple_supply,one_day')
# should fail: exporting `random` not allowed for conversion_plus tech
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override_converison_plus('random'), scenario='simple_conversion_plus,one_day')
# should pass: exporting electricity for supply tech
build_model(override_dict=override_supply('electricity'), scenario='simple_supply,one_day')
# should pass: exporting heat for conversion tech
build_model(override_dict=override_converison_plus('heat'), scenario='simple_conversion_plus,one_day')
def test_allowed_time_varying_constraints(self):
"""
`file=` is only allowed on a hardcoded list of constraints, unless
`_time_varying` is appended to the constraint (i.e. user input)
"""
allowed_constraints_no_file = list(
set(defaults_model.tech_groups.storage.allowed_constraints)
.difference(defaults.file_allowed)
)
allowed_constraints_file = list(
set(defaults_model.tech_groups.storage.allowed_constraints)
.intersection(defaults.file_allowed)
)
override = lambda param: AttrDict.from_yaml_string(
"techs.test_storage.constraints.{}: file=binary_one_day.csv".format(param)
)
# should fail: Cannot have `file=` on the following constraints
for param in allowed_constraints_no_file:
with pytest.raises(exceptions.ModelError) as errors:
build_model(override_dict=override(param), scenario='simple_storage,one_day')
assert check_error_or_warning(
errors,
'Cannot load `{}` from file for configuration'.format(param)
)
# should pass: can have `file=` on the following constraints
for param in allowed_constraints_file:
build_model(override_dict=override(param), scenario='simple_storage,one_day')
def test_incorrect_location_coordinates(self):
"""
Either all or no locations must have `coordinates` defined and, if all
defined, they must be in the same coordinate system (lat/lon or x/y)
"""
def _override(param0, param1):
override = {}
if param0 is not None:
override.update({'locations.0.coordinates': param0})
if param1 is not None:
override.update({'locations.1.coordinates': param1})
return override
cartesian0 = {'x': 0, 'y': 1}
cartesian1 = {'x': 1, 'y': 1}
geographic0 = {'lat': 0, 'lon': 1}
geographic1 = {'lat': 1, 'lon': 1}
fictional0 = {'a': 0, 'b': 1}
fictional1 = {'a': 1, 'b': 1}
# should fail: cannot have locations in one place and not in another
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=_override(cartesian0, None), scenario='simple_storage,one_day')
check_error_or_warning(error, "Either all or no locations must have `coordinates` defined")
# should fail: cannot have cartesian coordinates in one place and geographic in another
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=_override(cartesian0, geographic1), scenario='simple_storage,one_day')
check_error_or_warning(error, "All locations must use the same coordinate format")
# should fail: cannot use a non-cartesian or non-geographic coordinate system
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=_override(fictional0, fictional1), scenario='simple_storage,one_day')
check_error_or_warning(error, "Unidentified coordinate system")
# should fail: coordinates must be given as key:value pairs
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=_override([0, 1], [1, 1]), scenario='simple_storage,one_day')
check_error_or_warning(error, "Coordinates must be given in the format")
# should pass: cartesian coordinates in both places
build_model(override_dict=_override(cartesian0, cartesian1), scenario='simple_storage,one_day')
# should pass: geographic coordinates in both places
build_model(override_dict=_override(geographic0, geographic1), scenario='simple_storage,one_day')
def test_one_way(self):
"""
With one_way transmission, we remove one direction of a link from
loc_tech_carriers_prod and the other from loc_tech_carriers_con.
"""
override = {
'links.X1,N1.techs.heat_pipes.constraints.one_way': True,
'links.N1,X2.techs.heat_pipes.constraints.one_way': True,
'links.N1,X3.techs.heat_pipes.constraints.one_way': True,
'model.subset_time': '2005-01-01'
}
m = calliope.examples.urban_scale(override_dict=override)
removed_prod_links = ['X1::heat_pipes:N1', 'N1::heat_pipes:X2', 'N1::heat_pipes:X3']
removed_con_links = ['N1::heat_pipes:X1', 'X2::heat_pipes:N1', 'X3::heat_pipes:N1']
for link in removed_prod_links:
assert link not in m._model_data.loc_tech_carriers_prod.values
for link in removed_con_links:
assert link not in m._model_data.loc_tech_carriers_con.values
def test_milp_constraints(self):
"""
If `units` is defined, but not `energy_cap_per_unit`, throw an error
"""
# should fail: no energy_cap_per_unit
override1 = AttrDict.from_yaml_string("techs.test_supply_elec.constraints.units_max: 4")
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario='simple_supply,one_day')
# should pass: energy_cap_per_unit given
override2 = AttrDict.from_yaml_string("""
techs.test_supply_elec.constraints:
units_max: 4
energy_cap_per_unit: 5
""")
build_model(override_dict=override2, scenario='simple_supply,one_day')
def test_force_resource_ignored(self):
"""
If a technology is defines force_resource but is not in loc_techs_finite_resource
it will have no effect
"""
override = {
'techs.test_supply_elec.constraints.resource': np.inf,
'techs.test_supply_elec.constraints.force_resource': True,
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override, scenario='simple_supply,one_day')
assert check_error_or_warning(
excinfo,
'`test_supply_elec` at `0` defines force_resource but not a finite resource'
)
def test_override_coordinates(self):
"""
Check that warning is raised if we are completely overhauling the
coordinate system with an override
"""
override = {
'locations': {
'X1.coordinates': {'lat': 51.4596158, 'lon': -0.1613446},
'X2.coordinates': {'lat': 51.4652373, 'lon': -0.1141548},
'X3.coordinates': {'lat': 51.4287016, 'lon': -0.1310635},
'N1.coordinates': {'lat': 51.4450766, 'lon': -0.1247183}
},
'links': {
'X1,X2.techs.power_lines.distance': 10,
'X1,X3.techs.power_lines.istance': 5,
'X1,N1.techs.heat_pipes.distance': 3,
'N1,X2.techs.heat_pipes.distance': 3,
'N1,X3.techs.heat_pipes.distance': 4
}
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
calliope.examples.urban_scale(override_dict=override)
assert check_error_or_warning(
excinfo,
"Updated from coordinate system"
)
def test_clustering_and_cyclic_storage(self):
"""
Don't allow time clustering with cyclic storage if not also using
storage_inter_cluster
"""
override = {
'model.subset_time': ['2005-01-01', '2005-01-04'],
'model.time': {
'function': 'apply_clustering',
'function_options': {
'clustering_func': 'file=cluster_days.csv:0', 'how': 'mean',
'storage_inter_cluster': False
}
},
'run.cyclic_storage': True
}
with pytest.raises(exceptions.ModelError) as error:
build_model(override, scenario='simple_supply')
assert check_error_or_warning(error, 'cannot have cyclic storage')
def test_incorrect_resource_unit(self):
"""
Only `energy`, `energy_per_cap`, or `energy_per_area` is allowed under
`resource unit`.
"""
def _override(resource_unit):
return {
'techs.test_supply_elec.constraints.resource_unit': resource_unit
}
with pytest.raises(exceptions.ModelError) as error:
build_model(_override('power'), scenario='simple_supply')
build_model(_override('energy'), scenario='simple_supply')
build_model(_override('energy_per_cap'), scenario='simple_supply')
build_model(_override('energy_per_area'), scenario='simple_supply')
assert check_error_or_warning(
error,
'`power` is an unknown resource unit for `test_supply_elec`'
)
class TestDataset:
# FIXME: What are we testing here?
def test_inconsistent_timesteps(self):
"""
Timesteps must be consistent?
"""
def test_unassigned_sets(self):
"""
Check that all sets in which there are possible loc:techs are assigned
and have been filled
"""
models = dict()
models['model_national'] = calliope.examples.national_scale()
models['model_urban'] = calliope.examples.urban_scale()
models['model_milp'] = calliope.examples.milp()
for model_name, model in models.items():
for set_name, set_vals in model._model_data.coords.items():
if 'constraint' in set_name:
assert set(set_vals.values) == set(constraint_sets[model_name][set_name])
def test_negative_cost_unassigned_cap(self):
"""
Any negative cost associated with a capacity (e.g. cost_energy_cap) must
be applied to a capacity iff the upper bound of that capacity has been defined
"""
# should fail: resource_cap cost is negtive, resource_cap_max is infinite
override = AttrDict.from_yaml_string(
"techs.test_supply_plus.costs.monetary.resource_cap: -10"
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario='simple_supply_plus,one_day')
# should fail: storage_cap cost is negative, storage_cap_max is infinite
override = AttrDict.from_yaml_string(
"""
techs.test_storage:
constraints.storage_cap_max: .inf
costs.monetary.storage_cap: -10
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario='simple_storage,one_day')
# FIXME: What are the *required* arrays?
def test_missing_array(self):
"""
Check that the dataset includes all arrays *required* for a model to function
"""
# FIXME: What are the *required* attributes?
def test_missing_attrs(self):
"""
Check that the dataset includes all attributes *required* for a model to function
"""
def test_force_infinite_resource(self):
"""
Ensure that no loc-tech specifies infinite resource and force_resource=True
"""
override = {
'techs.test_supply_plus.constraints.resource': 'file=supply_plus_resource_inf.csv',
'techs.test_supply_plus.constraints.force_resource': True,
}
with pytest.raises(exceptions.ModelError) as error_info:
build_model(override_dict=override, scenario='simple_supply_plus,one_day')
assert check_error_or_warning(error_info, 'Ensure all entries are numeric')
def test_positive_demand(self):
"""
Resource for demand must be negative
"""
override = {
'techs.test_demand_elec.constraints.resource': 'file=demand_elec_positive.csv',
}
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario='simple_supply,one_day')
def test_empty_dimensions(self):
"""
Empty dimensions lead Pyomo to blow up (building sets with no data),
so check that we have successfully removed them here.
"""
model = build_model(scenario='simple_conversion_plus,one_day')
assert 'distance' not in model._model_data.data_vars
assert 'lookup_remotes' not in model._model_data.data_vars
def check_operate_mode_allowed(self):
"""
On masking times, operate mode will no longer be allowed
"""
model = build_model(scenario='simple_supply,one_day')
assert model.model_data.attrs['allow_operate_mode'] == 1
model1 = calliope.examples.time_masking()
assert model1.model_data.attrs['allow_operate_mode'] == 0
def test_15min_timesteps(self):
override = {
'techs.test_demand_elec.constraints.resource': 'file=demand_elec_15mins.csv',
}
model = build_model(override, scenario='simple_supply,one_day')
assert model.inputs.timestep_resolution.to_pandas().unique() == [0.25]
def test_clustering(self):
"""
On clustering, there are a few new dimensions in the model_data, and a
few new lookup arrays
"""
override = {
'model.subset_time': ['2005-01-01', '2005-01-04'],
'model.time': {
'function': 'apply_clustering',
'function_options': {
'clustering_func': 'file=cluster_days.csv:0', 'how': 'mean'
}
}
}
model = build_model(override, scenario='simple_supply')
assert 'clusters' in model._model_data.dims
assert 'lookup_cluster_first_timestep' in model._model_data.data_vars
assert 'lookup_cluster_last_timestep' in model._model_data.data_vars
assert 'lookup_datestep_last_cluster_timestep' in model._model_data.data_vars
assert 'lookup_datestep_cluster' in model._model_data.data_vars
assert 'timestep_cluster' in model._model_data.data_vars
datesteps = model.inputs.datesteps.to_index().strftime('%Y-%m-%d')
daterange = pd.date_range('2005-01-01', '2005-01-04', freq='1D').strftime('%Y-%m-%d')
assert np.array_equal(datesteps, daterange)
def test_clustering_no_datestep(self):
"""
On clustering, there are a few new dimensions in the model_data, and a
few new lookup arrays. Cyclic storage is set to False as you cannot
have cyclic storage without `storage_inter_cluster` being active.
"""
override = {
'model.subset_time': ['2005-01-01', '2005-01-04'],
'model.time': {
'function': 'apply_clustering',
'function_options': {
'clustering_func': 'file=cluster_days.csv:0', 'how': 'mean',
'storage_inter_cluster': False
}
},
'run.cyclic_storage': False
}
model = build_model(override, scenario='simple_supply')
assert 'clusters' in model._model_data.dims
assert 'datesteps' not in model._model_data.dims
assert 'lookup_cluster_first_timestep' in model._model_data.data_vars
assert 'lookup_cluster_last_timestep' in model._model_data.data_vars
assert 'lookup_datestep_last_cluster_timestep' not in model._model_data.data_vars
assert 'lookup_datestep_cluster' not in model._model_data.data_vars
assert 'timestep_cluster' in model._model_data.data_vars
#def test_future_warning(self):
# """
# Test and warnings to be uncommented when a futurewarning is present
# """
# with pytest.warns(FutureWarning) as warning:
# build_model({}, override_groups='')
# assert check_error_or_warning(warning, '')
class TestUtil:
def test_concat_iterable_ensures_same_length_iterables(self):
"""
All iterables must have the same length
"""
iterables = [('1', '2', '3'), ('4', '5')]
iterables_swapped = [('4', '5'), ('1', '2', '3')]
iterables_correct = [('1', '2', '3'), ('4', '5', '6')]
concatenator = [':', '::']
with pytest.raises(AssertionError):
calliope.core.preprocess.util.concat_iterable(iterables, concatenator)
calliope.core.preprocess.util.concat_iterable(iterables_swapped, concatenator)
concatenated = calliope.core.preprocess.util.concat_iterable(iterables_correct, concatenator)
assert concatenated == ['1:2::3', '4:5::6']
def test_concat_iterable_check_concatenators(self):
"""
Contatenators should be one shorter than the length of each iterable
"""
iterables = [('1', '2', '3'), ('4', '5', '6')]
concat_one = [':']
concat_two_diff = [':', '::']
concat_two_same = [':', ':']
concat_three = [':', ':', ':']
with pytest.raises(AssertionError):
calliope.core.preprocess.util.concat_iterable(iterables, concat_one)
calliope.core.preprocess.util.concat_iterable(iterables, concat_three)
concatenated1 = calliope.core.preprocess.util.concat_iterable(iterables, concat_two_diff)
assert concatenated1 == ['1:2::3', '4:5::6']
concatenated2 = calliope.core.preprocess.util.concat_iterable(iterables, concat_two_same)
assert concatenated2 == ['1:2:3', '4:5:6']
def test_vincenty(self):
# London to Paris: about 344 km
coords = [(51.507222, -0.1275), (48.8567, 2.3508)]
distance = calliope.core.preprocess.util.vincenty(coords[0], coords[1])
assert distance == pytest.approx(343834) # in meters
class TestTime:
@pytest.fixture
def model(self):
return calliope.examples.urban_scale(
override_dict={'model.subset_time': ['2005-01-01', '2005-01-10']}
)
def test_add_max_demand_timesteps(self, model):
data = model._model_data_original.copy()
data = time.add_max_demand_timesteps(data)
assert (
data['max_demand_timesteps'].loc[dict(carriers='heat')].values ==
np.datetime64('2005-01-05T07:00:00')
)
assert (
data['max_demand_timesteps'].loc[dict(carriers='electricity')].values ==
np.datetime64('2005-01-10T09:00:00')
)
|
|
from warnings import warn as Warn
from ..cg.shapes import asShape
from ..io.FileIO import FileIO
from .weights import W, WSP
from ._contW_lists import ContiguityWeightsLists
from .util import get_ids
WT_TYPE = {'rook': 2, 'queen': 1} # for _contW_Binning
__author__ = "Sergio J. Rey <[email protected]> , Levi John Wolf <[email protected]>"
class Rook(W):
def __init__(self, polygons, **kw):
"""
Construct a weights object from a collection of pysal polygons.
Arguments
---------
polygons : list
a collection of PySAL shapes to build weights from
ids : list
a list of names to use to build the weights
**kw : keyword arguments
optional arguments for :class:`pysal.weights.W`
See Also
---------
:class:`pysal.weights.W`
"""
criterion = 'rook'
ids = kw.pop('ids', None)
neighbors, ids = _build(polygons, criterion=criterion,
ids=ids)
W.__init__(self, neighbors, ids=ids, **kw)
@classmethod
def from_shapefile(cls, filepath, idVariable=None, full=False, **kwargs):
"""
Rook contiguity weights from a polygon shapefile.
Parameters
----------
shapefile : string
name of polygon shapefile including suffix.
sparse : boolean
If True return WSP instance
If False return W instance
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> wr=rook_from_shapefile(pysal.examples.get_path("columbus.shp"), "POLYID")
>>> "%.3f"%wr.pct_nonzero
'8.330'
>>> wr=rook_from_shapefile(pysal.examples.get_path("columbus.shp"), sparse=True)
>>> pct_sp = wr.sparse.nnz *1. / wr.n**2
>>> "%.3f"%pct_sp
'0.083'
Notes
-----
Rook contiguity defines as neighbors any pair of polygons that share a
common edge in their polygon definitions.
See Also
--------
:class:`pysal.weights.W`
:class:`pysal.weights.Rook`
"""
sparse = kwargs.pop('sparse', False)
if idVariable is not None:
ids = get_ids(filepath, idVariable)
else:
ids = None
w = cls(FileIO(filepath), ids=ids,**kwargs)
w.set_shapefile(filepath, idVariable=idVariable, full=full)
if sparse:
w = w.to_WSP()
return w
@classmethod
def from_iterable(cls, iterable, **kwargs):
"""
Construct a weights object from a collection of arbitrary polygons. This
will cast the polygons to PySAL polygons, then build the W.
Arguments
---------
iterable : iterable
a collection of of shapes to be cast to PySAL shapes. Must
support iteration. Contents should at least implement a
`__geo_interface__` attribute or be able to be coerced to
geometries using pysal.cg.asShape
**kw : keyword arguments
optional arguments for :class:`pysal.weights.W`
See Also
----------
:class:`pysal.weights.W`
:class:`pysal.weights.Rook`
"""
new_iterable = [asShape(shape) for shape in iterable]
return cls(new_iterable, **kwargs)
@classmethod
def from_dataframe(cls, df, geom_col='geometry',
idVariable=None, ids=None, id_order=None, **kwargs):
"""
Construct a weights object from a pandas dataframe with a geometry
column. This will cast the polygons to PySAL polygons, then build the W
using ids from the dataframe.
Arguments
---------
df : DataFrame
a :class: `pandas.DataFrame` containing geometries to use
for spatial weights
geom_col : string
the name of the column in `df` that contains the
geometries. Defaults to `geometry`
idVariable : string
the name of the column to use as IDs. If nothing is
provided, the dataframe index is used
ids : list
a list of ids to use to index the spatial weights object.
Order is not respected from this list.
id_order : list
an ordered list of ids to use to index the spatial weights
object. If used, the resulting weights object will iterate
over results in the order of the names provided in this
argument.
See Also
---------
:class:`pysal.weights.W`
:class:`pysal.weights.Rook`
"""
if id_order is not None:
if id_order is True and ((idVariable is not None)
or (ids is not None)):
# if idVariable is None, we want ids. Otherwise, we want the
# idVariable column
id_order = list(df.get(idVariable, ids))
else:
id_order = df.get(id_order, ids)
elif idVariable is not None:
ids = df.get(idVariable).tolist()
elif isinstance(ids, str):
ids = df.get(ids).tolist()
return cls.from_iterable(df[geom_col].tolist(), ids=ids,
id_order=id_order, **kwargs)
class Queen(W):
def __init__(self, polygons, **kw):
"""
Construct a weights object from a collection of pysal polygons.
Arguments
---------
polygons : list
a collection of PySAL shapes to build weights from
ids : list
a list of names to use to build the weights
**kw : keyword arguments
optional arguments for :class:`pysal.weights.W`
See Also
---------
:class:`pysal.weights.W`
"""
criterion = 'queen'
ids = kw.pop('ids', None)
neighbors, ids = _build(polygons, ids=ids,
criterion=criterion)
W.__init__(self, neighbors, ids=ids, **kw)
@classmethod
def from_shapefile(cls, filepath, idVariable=None, full=False, **kwargs):
"""
Queen contiguity weights from a polygon shapefile.
Parameters
----------
shapefile : string
name of polygon shapefile including suffix.
idVariable : string
name of a column in the shapefile's DBF to use for ids.
sparse : boolean
If True return WSP instance
If False return W instance
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> wq=Queen.from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> "%.3f"%wq.pct_nonzero
'9.829'
>>> wq=Queen.from_shapefile(pysal.examples.get_path("columbus.shp"),"POLYID")
>>> "%.3f"%wq.pct_nonzero
'9.829'
>>> wq=Queen.from_shapefile(pysal.examples.get_path("columbus.shp"), sparse=True)
>>> pct_sp = wq.sparse.nnz *1. / wq.n**2
>>> "%.3f"%pct_sp
'0.098'
Notes
Queen contiguity defines as neighbors any pair of polygons that share at
least one vertex in their polygon definitions.
See Also
--------
:class:`pysal.weights.W`
:class:`pysal.weights.Queen`
"""
sparse = kwargs.pop('sparse', False)
if idVariable is not None:
ids = get_ids(filepath, idVariable)
else:
ids = None
w = cls(FileIO(filepath), ids=ids, **kwargs)
w.set_shapefile(filepath, idVariable=idVariable, full=full)
if sparse:
w = w.to_WSP()
return w
@classmethod
def from_iterable(cls, iterable, sparse=False, **kwargs):
"""
Construct a weights object from a collection of arbitrary polygons. This
will cast the polygons to PySAL polygons, then build the W.
Arguments
---------
iterable : iterable
a collection of of shapes to be cast to PySAL shapes. Must
support iteration. Contents should at least implement a
`__geo_interface__` attribute or be able to be coerced to
geometries using pysal.cg.asShape
**kw : keyword arguments
optional arguments for :class:`pysal.weights.W`
See Also
----------
:class:`pysal.weights.W`
:class:`pysal.weights.Queen`
"""
new_iterable = [asShape(shape) for shape in iterable]
w = cls(new_iterable, **kwargs)
if sparse:
w = WSP.from_W(w)
return w
@classmethod
def from_dataframe(cls, df, geom_col='geometry', **kwargs):
"""
Construct a weights object from a pandas dataframe with a geometry
column. This will cast the polygons to PySAL polygons, then build the W
using ids from the dataframe.
Arguments
---------
df : DataFrame
a :class: `pandas.DataFrame` containing geometries to use
for spatial weights
geom_col : string
the name of the column in `df` that contains the
geometries. Defaults to `geometry`
idVariable : string
the name of the column to use as IDs. If nothing is
provided, the dataframe index is used
ids : list
a list of ids to use to index the spatial weights object.
Order is not respected from this list.
id_order : list
an ordered list of ids to use to index the spatial weights
object. If used, the resulting weights object will iterate
over results in the order of the names provided in this
argument.
See Also
---------
:class:`pysal.weights.W`
:class:`pysal.weights.Queen`
"""
idVariable = kwargs.pop('idVariable', None)
ids = kwargs.pop('ids', None)
id_order = kwargs.pop('id_order', None)
if id_order is not None:
if id_order is True and ((idVariable is not None)
or (ids is not None)):
# if idVariable is None, we want ids. Otherwise, we want the
# idVariable column
ids = list(df.get(idVariable, ids))
id_order = ids
elif isinstance(id_order, str):
ids = df.get(id_order, ids)
id_order = ids
elif idVariable is not None:
ids = df.get(idVariable).tolist()
elif isinstance(ids, str):
ids = df.get(ids).tolist()
w = cls.from_iterable(df[geom_col].tolist(), ids=ids, id_order=id_order, **kwargs)
return w
def _build(polygons, criterion="rook", ids=None):
"""
This is a developer-facing function to construct a spatial weights object.
Arguments
---------
polygons : list
list of pysal polygons to use to build contiguity
criterion : string
option of which kind of contiguity to build. Is either "rook" or "queen"
ids : list
list of ids to use to index the neighbor dictionary
Returns
-------
tuple containing (neighbors, ids), where neighbors is a dictionary
describing contiguity relations and ids is the list of ids used to index
that dictionary.
NOTE: this is different from the prior behavior of buildContiguity, which
returned an actual weights object. Since this just dispatches for the
classes above, this returns the raw ingredients for a spatial weights
object, not the object itself.
"""
if ids and len(ids) != len(set(ids)):
raise ValueError("The argument to the ids parameter contains duplicate entries.")
wttype = WT_TYPE[criterion.lower()]
geo = polygons
if issubclass(type(geo), FileIO):
geo.seek(0) # Make sure we read from the beginning of the file.
neighbor_data = ContiguityWeightsLists(polygons, wttype=wttype).w
neighbors = {}
#weights={}
if ids:
for key in neighbor_data:
ida = ids[key]
if ida not in neighbors:
neighbors[ida] = set()
neighbors[ida].update([ids[x] for x in neighbor_data[key]])
for key in neighbors:
neighbors[key] = set(neighbors[key])
else:
for key in neighbor_data:
neighbors[key] = set(neighbor_data[key])
return neighbors, ids
def buildContiguity(polygons, criterion="rook", ids=None):
"""
This is a deprecated function.
It builds a contiguity W from the polygons provided. As such, it is now
identical to calling the class constructors for Rook or Queen.
"""
#Warn('This function is deprecated. Please use the Rook or Queen classes',
# UserWarning)
if criterion.lower() == 'rook':
return Rook(polygons, ids=ids)
elif criterion.lower() == 'queen':
return Queen(polygons, ids=ids)
else:
raise Exception('Weights criterion "{}" was not found.'.format(criterion))
|
|
"""
Binary Decision Diagrams
Interface Functions:
bddvar
bddnode
bdd
expr2bddnode
expr2bdd
bdd2expr
Interface Classes:
BinaryDecisionDiagram
BDDConstant
BDDVariable
"""
# Disable "redefining name from outer scope"
# pylint: disable=W0621
import random
import weakref
from pyeda.boolalg import boolfunc
from pyeda.boolalg.expr import exprvar, Or, And, EXPRZERO, EXPRONE
from pyeda.util import cached_property
# existing BDDVariable references
BDDVARIABLES = dict()
BDDNODES = weakref.WeakValueDictionary()
BDDS = weakref.WeakValueDictionary()
_RESTRICT_CACHE = weakref.WeakValueDictionary()
class BDDNode(object):
"""Binary Decision Diagram Node"""
def __init__(self, root, low, high):
self.root = root
self.low = low
self.high = high
BDDNODEZERO = BDDNODES[(-2, None, None)] = BDDNode(-2, None, None)
BDDNODEONE = BDDNODES[(-1, None, None)] = BDDNode(-1, None, None)
def bddvar(name, index=None):
"""Return a BDD variable.
Parameters
----------
name : str
The variable's identifier string.
index : int or tuple[int], optional
One or more integer suffixes for variables that are part of a
multi-dimensional bit-vector, eg x[1], x[1][2][3]
"""
bvar = boolfunc.var(name, index)
try:
var = BDDVARIABLES[bvar.uniqid]
except KeyError:
var = BDDVARIABLES[bvar.uniqid] = BDDVariable(bvar)
BDDS[var.node] = var
return var
def bddnode(root, low, high):
"""Return a unique BDD node."""
if low is high:
node = low
else:
key = (root, low, high)
try:
node = BDDNODES[key]
except KeyError:
node = BDDNODES[key] = BDDNode(*key)
return node
def bdd(node):
"""Return a unique BDD."""
try:
_bdd = BDDS[node]
except KeyError:
_bdd = BDDS[node] = BinaryDecisionDiagram(node)
return _bdd
def expr2bddnode(expr):
"""Convert an expression into a BDD node."""
if expr is EXPRZERO:
return BDDNODEZERO
elif expr is EXPRONE:
return BDDNODEONE
else:
top = expr.top
# Register this variable
_ = bddvar(top.names, top.indices)
root = top.uniqid
low = expr2bddnode(expr.restrict({top: 0}))
high = expr2bddnode(expr.restrict({top: 1}))
return bddnode(root, low, high)
def expr2bdd(expr):
"""Convert an expression into a binary decision diagram."""
return bdd(expr2bddnode(expr))
def bdd2expr(bdd, conj=False):
"""Convert a binary decision diagram into an expression."""
if bdd.node is BDDNODEZERO:
return EXPRZERO
elif bdd.node is BDDNODEONE:
return EXPRONE
else:
if conj:
outer, inner = (And, Or)
paths = _iter_all_paths(bdd.node, BDDNODEZERO)
else:
outer, inner = (Or, And)
paths = _iter_all_paths(bdd.node, BDDNODEONE)
terms = list()
for path in paths:
expr_point = {exprvar(v.names, v.indices): val
for v, val in path2point(path).items()}
terms.append(boolfunc.point2term(expr_point, conj))
return outer(*[inner(*term) for term in terms])
def path2point(path):
"""Convert a BDD path to a BDD point."""
point = dict()
for i, node in enumerate(path[:-1]):
if node.low is path[i+1]:
point[BDDVARIABLES[node.root]] = 0
elif node.high is path[i+1]:
point[BDDVARIABLES[node.root]] = 1
return point
def upoint2bddpoint(upoint):
"""Convert an untyped point to a BDD point."""
point = dict()
for uniqid in upoint[0]:
point[BDDVARIABLES[uniqid]] = 0
for uniqid in upoint[1]:
point[BDDVARIABLES[uniqid]] = 1
class BinaryDecisionDiagram(boolfunc.Function):
"""Boolean function represented by a binary decision diagram."""
def __init__(self, node):
self.node = node
def __str__(self):
return "BDD({0.root}, {0.low}, {0.high})".format(self.node)
def __repr__(self):
return str(self)
# Operators
def __invert__(self):
return bdd(_neg(self.node))
def __or__(self, other):
other_node = self.box(other).node
# f | g <=> ITE(f, 1, g)
return bdd(_ite(self.node, BDDNODEONE, other_node))
def __and__(self, other):
other_node = self.box(other).node
# f & g <=> ITE(f, g, 0)
return bdd(_ite(self.node, other_node, BDDNODEZERO))
def __xor__(self, other):
other_node = self.box(other).node
# f ^ g <=> ITE(f, g', g)
return bdd(_ite(self.node, _neg(other_node), other_node))
def __sub__(self, other):
other_node = self.box(other).node
# f - g <=> ITE(f, 1, g')
return bdd(_ite(self.node, BDDNODEONE, _neg(other_node)))
# From Function
@cached_property
def support(self):
return frozenset(self.inputs)
@cached_property
def inputs(self):
_inputs = list()
for node in self.traverse():
if node.root > 0:
v = BDDVARIABLES[node.root]
if v not in _inputs:
_inputs.append(v)
return tuple(reversed(_inputs))
def urestrict(self, upoint):
return bdd(_urestrict(self.node, upoint))
def compose(self, mapping):
node = self.node
for v, g in mapping.items():
fv0, fv1 = bdd(node).cofactors(v)
node = _ite(g.node, fv1.node, fv0.node)
return bdd(node)
def satisfy_one(self):
path = _find_path(self.node, BDDNODEONE)
if path is None:
return None
else:
return path2point(path)
def satisfy_all(self):
for path in _iter_all_paths(self.node, BDDNODEONE):
yield path2point(path)
def satisfy_count(self):
return sum(1 for _ in self.satisfy_all())
def is_zero(self):
return self.node is BDDNODEZERO
def is_one(self):
return self.node is BDDNODEONE
@staticmethod
def box(arg):
if isinstance(arg, BinaryDecisionDiagram):
return arg
elif arg == 0 or arg == '0':
return BDDZERO
elif arg == 1 or arg == '1':
return BDDONE
else:
return CONSTANTS[bool(arg)]
# Specific to BinaryDecisionDiagram
def traverse(self):
"""Iterate through all nodes in this BDD in DFS order."""
for node in _dfs(self.node, set()):
yield node
def equivalent(self, other):
"""Return whether this BDD is equivalent to another."""
other = self.box(other)
return self.node is other.node
def to_dot(self, name='BDD'):
"""Convert to DOT language representation."""
parts = ['graph', name, '{']
for node in self.traverse():
if node is BDDNODEZERO:
parts += ['n' + str(id(node)), '[label=0,shape=box];']
elif node is BDDNODEONE:
parts += ['n' + str(id(node)), '[label=1,shape=box];']
else:
v = BDDVARIABLES[node.root]
parts.append('n' + str(id(node)))
parts.append('[label="{}",shape=circle];'.format(v))
for node in self.traverse():
if node is not BDDNODEZERO and node is not BDDNODEONE:
parts += ['n' + str(id(node)), '--',
'n' + str(id(node.low)),
'[label=0,style=dashed];']
parts += ['n' + str(id(node)), '--',
'n' + str(id(node.high)),
'[label=1];']
parts.append('}')
return " ".join(parts)
class BDDConstant(BinaryDecisionDiagram):
"""Binary decision diagram constant"""
VALUE = NotImplemented
def __bool__(self):
return bool(self.VALUE)
def __int__(self):
return self.VALUE
def __str__(self):
return str(self.VALUE)
class _BDDZero(BDDConstant):
"""Binary decision diagram zero"""
VALUE = 0
def __init__(self):
super(_BDDZero, self).__init__(BDDNODEZERO)
class _BDDOne(BDDConstant):
"""Binary decision diagram one"""
VALUE = 1
def __init__(self):
super(_BDDOne, self).__init__(BDDNODEONE)
BDDZERO = BDDS[BDDNODEZERO] = _BDDZero()
BDDONE = BDDS[BDDNODEONE] = _BDDOne()
CONSTANTS = [BDDZERO, BDDONE]
class BDDVariable(boolfunc.Variable, BinaryDecisionDiagram):
"""Binary decision diagram variable"""
def __init__(self, bvar):
boolfunc.Variable.__init__(self, bvar.names, bvar.indices)
node = bddnode(bvar.uniqid, BDDNODEZERO, BDDNODEONE)
BinaryDecisionDiagram.__init__(self, node)
def _neg(node):
"""Return the node that is the inverse of the input node."""
if node is BDDNODEZERO:
return BDDNODEONE
elif node is BDDNODEONE:
return BDDNODEZERO
else:
return bddnode(node.root, _neg(node.low), _neg(node.high))
def _ite(f, g, h):
"""Return node that results from recursively applying ITE(f, g, h)."""
# ITE(f, 1, 0) = f
if g is BDDNODEONE and h is BDDNODEZERO:
return f
# ITE(f, 0, 1) = f'
elif g is BDDNODEZERO and h is BDDNODEONE:
return _neg(f)
# ITE(1, g, h) = g
elif f is BDDNODEONE:
return g
# ITE(0, g, h) = h
elif f is BDDNODEZERO:
return h
# ITE(f, g, g) = g
elif g is h:
return g
else:
# ITE(f, g, h) = ITE(x, ITE(fx', gx', hx'), ITE(fx, gx, hx))
root = min(node.root for node in (f, g, h) if node.root > 0)
upoint0 = frozenset([root]), frozenset()
upoint1 = frozenset(), frozenset([root])
fv0, gv0, hv0 = [_urestrict(node, upoint0) for node in (f, g, h)]
fv1, gv1, hv1 = [_urestrict(node, upoint1) for node in (f, g, h)]
return bddnode(root, _ite(fv0, gv0, hv0), _ite(fv1, gv1, hv1))
def _urestrict(node, upoint):
"""Return node that results from untyped point restriction."""
if node is BDDNODEZERO or node is BDDNODEONE:
return node
key = (node, upoint)
try:
ret = _RESTRICT_CACHE[key]
except KeyError:
if node.root in upoint[0]:
ret = _urestrict(node.low, upoint)
elif node.root in upoint[1]:
ret = _urestrict(node.high, upoint)
else:
low = _urestrict(node.low, upoint)
high = _urestrict(node.high, upoint)
ret = bddnode(node.root, low, high)
_RESTRICT_CACHE[key] = ret
return ret
def _find_path(start, end, path=tuple()):
"""Return the path from start to end.
If no path exists, return None.
"""
path = path + (start, )
if start is end:
return path
else:
ret = None
if start.low is not None:
ret = _find_path(start.low, end, path)
if ret is None and start.high is not None:
ret = _find_path(start.high, end, path)
return ret
def _iter_all_paths(start, end, rand=False, path=tuple()):
"""Iterate through all paths from start to end."""
path = path + (start, )
if start is end:
yield path
else:
nodes = [start.low, start.high]
if rand:
random.shuffle(nodes)
for node in nodes:
if node is not None:
for _path in _iter_all_paths(node, end, rand, path):
yield _path
def _dfs(node, visited):
"""Iterate through a depth-first traveral starting at node."""
low, high = node.low, node.high
if low is not None:
for _node in _dfs(low, visited):
yield _node
if high is not None:
for _node in _dfs(high, visited):
yield _node
if node not in visited:
visited.add(node)
yield node
|
|
###############################################################################
#
# Format - A class for writing the Excel XLSX Worksheet file.
#
# Copyright 2013-2016, John McNamara, [email protected]
#
# Package imports.
from . import xmlwriter
class Format(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Format file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, properties={}, xf_indices=None, dxf_indices=None):
"""
Constructor.
"""
super(Format, self).__init__()
self.xf_format_indices = xf_indices
self.dxf_format_indices = dxf_indices
self.xf_index = None
self.dxf_index = None
self.num_format = 0
self.num_format_index = 0
self.font_index = 0
self.has_font = 0
self.has_dxf_font = 0
self.bold = 0
self.underline = 0
self.italic = 0
self.font_name = 'Calibri'
self.font_size = 11
self.font_color = 0x0
self.font_strikeout = 0
self.font_outline = 0
self.font_shadow = 0
self.font_script = 0
self.font_family = 2
self.font_charset = 0
self.font_scheme = 'minor'
self.font_condense = 0
self.font_extend = 0
self.theme = 0
self.hyperlink = 0
self.hidden = 0
self.locked = 1
self.text_h_align = 0
self.text_wrap = 0
self.text_v_align = 0
self.text_justlast = 0
self.rotation = 0
self.center_across = 0
self.fg_color = 0
self.bg_color = 0
self.pattern = 0
self.has_fill = 0
self.has_dxf_fill = 0
self.fill_index = 0
self.fill_count = 0
self.border_index = 0
self.has_border = 0
self.has_dxf_border = 0
self.border_count = 0
self.bottom = 0
self.bottom_color = 0
self.diag_border = 0
self.diag_color = 0
self.diag_type = 0
self.left = 0
self.left_color = 0
self.right = 0
self.right_color = 0
self.top = 0
self.top_color = 0
self.indent = 0
self.shrink = 0
self.merge_range = 0
self.reading_order = 0
self.just_distrib = 0
self.color_indexed = 0
self.font_only = 0
# Convert properties in the constructor to method calls.
for key, value in properties.items():
getattr(self, 'set_' + key)(value)
###########################################################################
#
# Format properties.
#
###########################################################################
def set_font_name(self, font_name):
"""
Set the Format font_name property such as 'Time New Roman'. The
default Excel font is 'Calibri'.
Args:
font_name: String with the font name. No default.
Returns:
Nothing.
"""
self.font_name = font_name
def set_font_size(self, font_size=11):
"""
Set the Format font_size property. The default Excel font size is 11.
Args:
font_size: Int with font size. No default.
Returns:
Nothing.
"""
self.font_size = font_size
def set_font_color(self, font_color):
"""
Set the Format font_color property. The Excel default is black.
Args:
font_color: String with the font color. No default.
Returns:
Nothing.
"""
self.font_color = self._get_color(font_color)
def set_bold(self, bold=1):
"""
Set the Format bold property.
Args:
bold: Default is 1, turns property on.
Returns:
Nothing.
"""
self.bold = bold
def set_italic(self, italic=1):
"""
Set the Format italic property.
Args:
italic: Default is 1, turns property on.
Returns:
Nothing.
"""
self.italic = italic
def set_underline(self, underline=1):
"""
Set the Format underline property.
Args:
underline: Default is 1, single underline.
Returns:
Nothing.
"""
self.underline = underline
def set_font_strikeout(self, font_strikeout=1):
"""
Set the Format font_strikeout property.
Args:
font_strikeout: Default is 1, turns property on.
Returns:
Nothing.
"""
self.font_strikeout = font_strikeout
def set_font_script(self, font_script=1):
"""
Set the Format font_script property.
Args:
font_script: Default is 1, superscript.
Returns:
Nothing.
"""
self.font_script = font_script
def set_font_outline(self, font_outline=1):
"""
Set the Format font_outline property.
Args:
font_outline: Default is 1, turns property on.
Returns:
Nothing.
"""
self.font_outline = font_outline
def set_font_shadow(self, font_shadow=1):
"""
Set the Format font_shadow property.
Args:
font_shadow: Default is 1, turns property on.
Returns:
Nothing.
"""
self.font_shadow = font_shadow
def set_num_format(self, num_format):
"""
Set the Format num_format property such as '#,##0'.
Args:
num_format: String representing the number format. No default.
Returns:
Nothing.
"""
self.num_format = num_format
def set_locked(self, locked=1):
"""
Set the Format locked property.
Args:
locked: Default is 1, turns property on.
Returns:
Nothing.
"""
self.locked = locked
def set_hidden(self, hidden=1):
"""
Set the Format hidden property.
Args:
hidden: Default is 1, turns property on.
Returns:
Nothing.
"""
self.hidden = hidden
def set_align(self, alignment):
"""
Set the Format cell alignment.
Args:
alignment: String representing alignment. No default.
Returns:
Nothing.
"""
alignment = alignment.lower()
# Set horizontal alignment properties.
if alignment == 'left':
self.set_text_h_align(1)
if alignment == 'centre':
self.set_text_h_align(2)
if alignment == 'center':
self.set_text_h_align(2)
if alignment == 'right':
self.set_text_h_align(3)
if alignment == 'fill':
self.set_text_h_align(4)
if alignment == 'justify':
self.set_text_h_align(5)
if alignment == 'center_across':
self.set_text_h_align(6)
if alignment == 'centre_across':
self.set_text_h_align(6)
if alignment == 'distributed':
self.set_text_h_align(7)
if alignment == 'justify_distributed':
self.set_text_h_align(7)
if alignment == 'justify_distributed':
self.just_distrib = 1
# Set vertical alignment properties.
if alignment == 'top':
self.set_text_v_align(1)
if alignment == 'vcentre':
self.set_text_v_align(2)
if alignment == 'vcenter':
self.set_text_v_align(2)
if alignment == 'bottom':
self.set_text_v_align(3)
if alignment == 'vjustify':
self.set_text_v_align(4)
if alignment == 'vdistributed':
self.set_text_v_align(5)
def set_center_across(self, center_across=1):
"""
Set the Format center_across property.
Args:
center_across: Default is 1, turns property on.
Returns:
Nothing.
"""
self.center_across = center_across
def set_text_wrap(self, text_wrap=1):
"""
Set the Format text_wrap property.
Args:
text_wrap: Default is 1, turns property on.
Returns:
Nothing.
"""
self.text_wrap = text_wrap
def set_rotation(self, rotation):
"""
Set the Format rotation property.
Args:
rotation: Rotation angle. No default.
Returns:
Nothing.
"""
rotation = int(rotation)
# Map user angle to Excel angle.
if rotation == 270:
rotation = 255
elif -90 <= rotation <= 90:
if rotation < 0:
rotation = -rotation + 90
else:
raise Exception(
"Rotation rotation outside range: -90 <= angle <= 90")
self.rotation = rotation
def set_indent(self, indent=1):
"""
Set the Format indent property.
Args:
indent: Default is 1, turns property on.
Returns:
Nothing.
"""
self.indent = indent
def set_shrink(self, shrink=1):
"""
Set the Format shrink property.
Args:
shrink: Default is 1, turns property on.
Returns:
Nothing.
"""
self.shrink = shrink
def set_text_justlast(self, text_justlast=1):
"""
Set the Format text_justlast property.
Args:
text_justlast: Default is 1, turns property on.
Returns:
Nothing.
"""
self.text_justlast = text_justlast
def set_pattern(self, pattern=1):
"""
Set the Format pattern property.
Args:
pattern: Default is 1, solid fill.
Returns:
Nothing.
"""
self.pattern = pattern
def set_bg_color(self, bg_color):
"""
Set the Format bg_color property.
Args:
bg_color: Background color. No default.
Returns:
Nothing.
"""
self.bg_color = self._get_color(bg_color)
def set_fg_color(self, fg_color):
"""
Set the Format fg_color property.
Args:
fg_color: Foreground color. No default.
Returns:
Nothing.
"""
self.fg_color = self._get_color(fg_color)
# set_border(style) Set cells borders to the same style
def set_border(self, style=1):
"""
Set the Format bottom property.
Args:
bottom: Default is 1, border type 1.
Returns:
Nothing.
"""
self.set_bottom(style)
self.set_top(style)
self.set_left(style)
self.set_right(style)
# set_border_color(color) Set cells border to the same color
def set_border_color(self, color):
"""
Set the Format bottom property.
Args:
color: Color string. No default.
Returns:
Nothing.
"""
self.set_bottom_color(color)
self.set_top_color(color)
self.set_left_color(color)
self.set_right_color(color)
def set_bottom(self, bottom=1):
"""
Set the Format bottom property.
Args:
bottom: Default is 1, border type 1.
Returns:
Nothing.
"""
self.bottom = bottom
def set_bottom_color(self, bottom_color):
"""
Set the Format bottom_color property.
Args:
bottom_color: Color string. No default.
Returns:
Nothing.
"""
self.bottom_color = self._get_color(bottom_color)
def set_diag_type(self, diag_type=1):
"""
Set the Format diag_type property.
Args:
diag_type: Default is 1, border type 1.
Returns:
Nothing.
"""
self.diag_type = diag_type
def set_left(self, left=1):
"""
Set the Format left property.
Args:
left: Default is 1, border type 1.
Returns:
Nothing.
"""
self.left = left
def set_left_color(self, left_color):
"""
Set the Format left_color property.
Args:
left_color: Color string. No default.
Returns:
Nothing.
"""
self.left_color = self._get_color(left_color)
def set_right(self, right=1):
"""
Set the Format right property.
Args:
right: Default is 1, border type 1.
Returns:
Nothing.
"""
self.right = right
def set_right_color(self, right_color):
"""
Set the Format right_color property.
Args:
right_color: Color string. No default.
Returns:
Nothing.
"""
self.right_color = self._get_color(right_color)
def set_top(self, top=1):
"""
Set the Format top property.
Args:
top: Default is 1, border type 1.
Returns:
Nothing.
"""
self.top = top
def set_top_color(self, top_color):
"""
Set the Format top_color property.
Args:
top_color: Color string. No default.
Returns:
Nothing.
"""
self.top_color = self._get_color(top_color)
def set_diag_color(self, diag_color):
"""
Set the Format diag_color property.
Args:
diag_color: Color string. No default.
Returns:
Nothing.
"""
self.diag_color = self._get_color(diag_color)
def set_diag_border(self, diag_border=1):
"""
Set the Format diag_border property.
Args:
diag_border: Default is 1, border type 1.
Returns:
Nothing.
"""
self.diag_border = diag_border
###########################################################################
#
# Internal Format properties. These aren't documented since they are
# either only used internally or else are unlikely to be set by the user.
#
###########################################################################
def set_has_font(self, has_font=1):
# Set the has_font property.
self.has_font = has_font
def set_has_fill(self, has_fill=1):
# Set the has_fill property.
self.has_fill = has_fill
def set_font_index(self, font_index):
# Set the font_index property.
self.font_index = font_index
def set_xf_index(self, xf_index):
# Set the xf_index property.
self.xf_index = xf_index
def set_dxf_index(self, dxf_index):
# Set the xf_index property.
self.dxf_index = dxf_index
def set_num_format_index(self, num_format_index):
# Set the num_format_index property.
self.num_format_index = num_format_index
def set_text_h_align(self, text_h_align):
# Set the text_h_align property.
self.text_h_align = text_h_align
def set_text_v_align(self, text_v_align):
# Set the text_v_align property.
self.text_v_align = text_v_align
def set_reading_order(self, reading_order=1):
# Set the reading_order property.
self.reading_order = reading_order
def set_valign(self, align):
# Set vertical cell alignment. This is required by the constructor
# properties dict to differentiate between the vertical and horizontal
# properties.
self.set_align(align)
def set_font_family(self, font_family):
# Set the Format font_family property.
self.font_family = font_family
def set_font_charset(self, font_charset):
# Set the Format font_charset property.
self.font_charset = font_charset
def set_font_scheme(self, font_scheme):
# Set the Format font_scheme property.
self.font_scheme = font_scheme
def set_font_condense(self, font_condense):
# Set the Format font_condense property.
self.font_condense = font_condense
def set_font_extend(self, font_extend):
# Set the Format font_extend property.
self.font_extend = font_extend
def set_theme(self, theme):
# Set the Format theme property.
self.theme = theme
def set_hyperlink(self, hyperlink=1):
# Set the properties for the hyperlink style. This doesn't
# currently work. To be fixed when styles are supported.
self.set_underline(1)
self.set_theme(10)
self.set_align('top')
self.hyperlink = hyperlink
def set_color_indexed(self, color_index):
# Used in the cell comment format.
self.color_indexed = color_index
def set_font_only(self, font_only=True):
# Used in the cell comment format.
self.font_only = font_only
# Compatibility methods.
def set_font(self, font_name):
# For compatibility with Excel::Writer::XLSX.
self.font_name = font_name
def set_size(self, font_size):
# For compatibility with Excel::Writer::XLSX.
self.font_size = font_size
def set_color(self, font_color):
# For compatibility with Excel::Writer::XLSX.
self.font_color = self._get_color(font_color)
###########################################################################
#
# Private API.
#
###########################################################################
def _get_align_properties(self):
# Return properties for an Style xf <alignment> sub-element.
changed = 0
align = []
# Check if any alignment options in the format have been changed.
if (self.text_h_align or self.text_v_align or self.indent
or self.rotation or self.text_wrap or self.shrink
or self.reading_order):
changed = 1
else:
return changed, align
# Indent is only allowed for horizontal left, right and distributed.
# If it is defined for any other alignment or no alignment has
# been set then default to left alignment.
if (self.indent
and self.text_h_align != 1
and self.text_h_align != 3
and self.text_h_align != 7):
self.text_h_align = 1
# Check for properties that are mutually exclusive.
if self.text_wrap:
self.shrink = 0
if self.text_h_align == 4:
self.shrink = 0
if self.text_h_align == 5:
self.shrink = 0
if self.text_h_align == 7:
self.shrink = 0
if self.text_h_align != 7:
self.just_distrib = 0
if self.indent:
self.just_distrib = 0
continuous = 'centerContinuous'
if self.text_h_align == 1:
align.append(('horizontal', 'left'))
if self.text_h_align == 2:
align.append(('horizontal', 'center'))
if self.text_h_align == 3:
align.append(('horizontal', 'right'))
if self.text_h_align == 4:
align.append(('horizontal', 'fill'))
if self.text_h_align == 5:
align.append(('horizontal', 'justify'))
if self.text_h_align == 6:
align.append(('horizontal', continuous))
if self.text_h_align == 7:
align.append(('horizontal', 'distributed'))
if self.just_distrib:
align.append(('justifyLastLine', 1))
# Property 'vertical' => 'bottom' is a default. It sets applyAlignment
# without an alignment sub-element.
if self.text_v_align == 1:
align.append(('vertical', 'top'))
if self.text_v_align == 2:
align.append(('vertical', 'center'))
if self.text_v_align == 4:
align.append(('vertical', 'justify'))
if self.text_v_align == 5:
align.append(('vertical', 'distributed'))
if self.indent:
align.append(('indent', self.indent))
if self.rotation:
align.append(('textRotation', self.rotation))
if self.text_wrap:
align.append(('wrapText', 1))
if self.shrink:
align.append(('shrinkToFit', 1))
if self.reading_order == 1:
align.append(('readingOrder', 1))
if self.reading_order == 2:
align.append(('readingOrder', 2))
return changed, align
def _get_protection_properties(self):
# Return properties for an Excel XML <Protection> element.
attribs = []
if not self.locked:
attribs.append(('locked', 0))
if self.hidden:
attribs.append(('hidden', 1))
return attribs
def _get_format_key(self):
# Returns a unique hash key for a font. Used by Workbook.
key = ':'.join(self._to_string(x) for x in (
self._get_font_key(),
self._get_border_key(),
self._get_fill_key(),
self._get_alignment_key(),
self.num_format,
self.locked,
self.hidden))
return key
def _get_font_key(self):
# Returns a unique hash key for a font. Used by Workbook.
key = ':'.join(self._to_string(x) for x in (
self.bold,
self.font_color,
self.font_charset,
self.font_family,
self.font_outline,
self.font_script,
self.font_shadow,
self.font_strikeout,
self.font_name,
self.italic,
self.font_size,
self.underline))
return key
def _get_border_key(self):
# Returns a unique hash key for a border style. Used by Workbook.
key = ':'.join(self._to_string(x) for x in (
self.bottom,
self.bottom_color,
self.diag_border,
self.diag_color,
self.diag_type,
self.left,
self.left_color,
self.right,
self.right_color,
self.top,
self.top_color))
return key
def _get_fill_key(self):
# Returns a unique hash key for a fill style. Used by Workbook.
key = ':'.join(self._to_string(x) for x in (
self.pattern,
self.bg_color,
self.fg_color))
return key
def _get_alignment_key(self):
# Returns a unique hash key for alignment formats.
key = ':'.join(self._to_string(x) for x in (
self.text_h_align,
self.text_v_align,
self.indent,
self.rotation,
self.text_wrap,
self.shrink,
self.reading_order))
return key
def _get_xf_index(self):
# Returns the XF index number used by Excel to identify a format.
if self.xf_index is not None:
# Format already has an index number so return it.
return self.xf_index
else:
# Format doesn't have an index number so assign one.
key = self._get_format_key()
if key in self.xf_format_indices:
# Format matches existing format with an index.
return self.xf_format_indices[key]
else:
# New format requiring an index. Note. +1 since Excel
# has an implicit "General" format at index 0.
index = 1 + len(self.xf_format_indices)
self.xf_format_indices[key] = index
self.xf_index = index
return index
def _get_dxf_index(self):
# Returns the DXF index number used by Excel to identify a format.
if self.dxf_index is not None:
# Format already has an index number so return it.
return self.dxf_index
else:
# Format doesn't have an index number so assign one.
key = self._get_format_key()
if key in self.dxf_format_indices:
# Format matches existing format with an index.
return self.dxf_format_indices[key]
else:
# New format requiring an index.
index = len(self.dxf_format_indices)
self.dxf_format_indices[key] = index
self.dxf_index = index
return index
def _get_color(self, color):
# Used in conjunction with the set_xxx_color methods to convert a
# color name into an RGB formatted string. These colors are for
# backward compatibility with older versions of Excel.
named_colors = {
'black': '#000000',
'blue': '#0000FF',
'brown': '#800000',
'cyan': '#00FFFF',
'gray': '#808080',
'green': '#008000',
'lime': '#00FF00',
'magenta': '#FF00FF',
'navy': '#000080',
'orange': '#FF6600',
'pink': '#FF00FF',
'purple': '#800080',
'red': '#FF0000',
'silver': '#C0C0C0',
'white': '#FFFFFF',
'yellow': '#FFFF00',
}
if color in named_colors:
color = named_colors[color]
return color
def _to_string(self, value):
# Convert number to a string but allow for utf-8 strings in Python 2.
try:
return str(value)
except UnicodeEncodeError:
return value.encode('utf-8')
###########################################################################
#
# XML methods.
#
###########################################################################
|
|
"""Test data purging."""
from datetime import datetime, timedelta
import json
from sqlalchemy.orm.session import Session
from homeassistant.components import recorder
from homeassistant.components.recorder.models import Events, RecorderRuns, States
from homeassistant.components.recorder.purge import purge_old_data
from homeassistant.components.recorder.util import session_scope
from homeassistant.const import EVENT_STATE_CHANGED
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.util import dt as dt_util
from .common import (
async_recorder_block_till_done,
async_wait_purge_done,
async_wait_recording_done,
)
from .conftest import SetupRecorderInstanceT
async def test_purge_old_states(
hass: HomeAssistantType, async_setup_recorder_instance: SetupRecorderInstanceT
):
"""Test deleting old states."""
instance = await async_setup_recorder_instance(hass)
await _add_test_states(hass, instance)
# make sure we start with 6 states
with session_scope(hass=hass) as session:
states = session.query(States)
assert states.count() == 6
assert states[0].old_state_id is None
assert states[-1].old_state_id == states[-2].state_id
events = session.query(Events).filter(Events.event_type == "state_changed")
assert events.count() == 6
# run purge_old_data()
finished = purge_old_data(instance, 4, repack=False)
assert not finished
assert states.count() == 2
states_after_purge = session.query(States)
assert states_after_purge[1].old_state_id == states_after_purge[0].state_id
assert states_after_purge[0].old_state_id is None
finished = purge_old_data(instance, 4, repack=False)
assert finished
assert states.count() == 2
async def test_purge_old_events(
hass: HomeAssistantType, async_setup_recorder_instance: SetupRecorderInstanceT
):
"""Test deleting old events."""
instance = await async_setup_recorder_instance(hass)
await _add_test_events(hass, instance)
with session_scope(hass=hass) as session:
events = session.query(Events).filter(Events.event_type.like("EVENT_TEST%"))
assert events.count() == 6
# run purge_old_data()
finished = purge_old_data(instance, 4, repack=False)
assert not finished
assert events.count() == 2
# we should only have 2 events left
finished = purge_old_data(instance, 4, repack=False)
assert finished
assert events.count() == 2
async def test_purge_old_recorder_runs(
hass: HomeAssistantType, async_setup_recorder_instance: SetupRecorderInstanceT
):
"""Test deleting old recorder runs keeps current run."""
instance = await async_setup_recorder_instance(hass)
await _add_test_recorder_runs(hass, instance)
# make sure we start with 7 recorder runs
with session_scope(hass=hass) as session:
recorder_runs = session.query(RecorderRuns)
assert recorder_runs.count() == 7
# run purge_old_data()
finished = purge_old_data(instance, 0, repack=False)
assert not finished
finished = purge_old_data(instance, 0, repack=False)
assert finished
assert recorder_runs.count() == 1
async def test_purge_method(
hass: HomeAssistantType,
async_setup_recorder_instance: SetupRecorderInstanceT,
caplog,
):
"""Test purge method."""
instance = await async_setup_recorder_instance(hass)
service_data = {"keep_days": 4}
await _add_test_events(hass, instance)
await _add_test_states(hass, instance)
await _add_test_recorder_runs(hass, instance)
await hass.async_block_till_done()
await async_wait_recording_done(hass, instance)
# make sure we start with 6 states
with session_scope(hass=hass) as session:
states = session.query(States)
assert states.count() == 6
events = session.query(Events).filter(Events.event_type.like("EVENT_TEST%"))
assert events.count() == 6
recorder_runs = session.query(RecorderRuns)
assert recorder_runs.count() == 7
runs_before_purge = recorder_runs.all()
await hass.async_block_till_done()
await async_wait_purge_done(hass, instance)
# run purge method - no service data, use defaults
await hass.services.async_call("recorder", "purge")
await hass.async_block_till_done()
# Small wait for recorder thread
await async_wait_purge_done(hass, instance)
# only purged old events
assert states.count() == 4
assert events.count() == 4
# run purge method - correct service data
await hass.services.async_call("recorder", "purge", service_data=service_data)
await hass.async_block_till_done()
# Small wait for recorder thread
await async_wait_purge_done(hass, instance)
# we should only have 2 states left after purging
assert states.count() == 2
# now we should only have 2 events left
assert events.count() == 2
# now we should only have 3 recorder runs left
runs = recorder_runs.all()
assert runs[0] == runs_before_purge[0]
assert runs[1] == runs_before_purge[5]
assert runs[2] == runs_before_purge[6]
assert "EVENT_TEST_PURGE" not in (event.event_type for event in events.all())
# run purge method - correct service data, with repack
service_data["repack"] = True
await hass.services.async_call("recorder", "purge", service_data=service_data)
await hass.async_block_till_done()
await async_wait_purge_done(hass, instance)
assert "Vacuuming SQL DB to free space" in caplog.text
async def test_purge_edge_case(
hass: HomeAssistantType,
async_setup_recorder_instance: SetupRecorderInstanceT,
):
"""Test states and events are purged even if they occurred shortly before purge_before."""
async def _add_db_entries(hass: HomeAssistantType, timestamp: datetime) -> None:
with recorder.session_scope(hass=hass) as session:
session.add(
Events(
event_id=1001,
event_type="EVENT_TEST_PURGE",
event_data="{}",
origin="LOCAL",
created=timestamp,
time_fired=timestamp,
)
)
session.add(
States(
entity_id="test.recorder2",
domain="sensor",
state="purgeme",
attributes="{}",
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
event_id=1001,
)
)
instance = await async_setup_recorder_instance(hass, None)
await async_wait_purge_done(hass, instance)
service_data = {"keep_days": 2}
timestamp = dt_util.utcnow() - timedelta(days=2, minutes=1)
await _add_db_entries(hass, timestamp)
with session_scope(hass=hass) as session:
states = session.query(States)
assert states.count() == 1
events = session.query(Events).filter(Events.event_type == "EVENT_TEST_PURGE")
assert events.count() == 1
await hass.services.async_call(
recorder.DOMAIN, recorder.SERVICE_PURGE, service_data
)
await hass.async_block_till_done()
await async_recorder_block_till_done(hass, instance)
await async_wait_purge_done(hass, instance)
assert states.count() == 0
assert events.count() == 0
async def test_purge_filtered_states(
hass: HomeAssistantType,
async_setup_recorder_instance: SetupRecorderInstanceT,
):
"""Test filtered states are purged."""
config: ConfigType = {"exclude": {"entities": ["sensor.excluded"]}}
instance = await async_setup_recorder_instance(hass, config)
assert instance.entity_filter("sensor.excluded") is False
def _add_db_entries(hass: HomeAssistantType) -> None:
with recorder.session_scope(hass=hass) as session:
# Add states and state_changed events that should be purged
for days in range(1, 4):
timestamp = dt_util.utcnow() - timedelta(days=days)
for event_id in range(1000, 1020):
_add_state_and_state_changed_event(
session,
"sensor.excluded",
"purgeme",
timestamp,
event_id * days,
)
# Add state **without** state_changed event that should be purged
timestamp = dt_util.utcnow() - timedelta(days=1)
session.add(
States(
entity_id="sensor.excluded",
domain="sensor",
state="purgeme",
attributes="{}",
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
)
)
# Add states and state_changed events that should be keeped
timestamp = dt_util.utcnow() - timedelta(days=2)
for event_id in range(200, 210):
_add_state_and_state_changed_event(
session,
"sensor.keep",
"keep",
timestamp,
event_id,
)
# Add states with linked old_state_ids that need to be handled
timestamp = dt_util.utcnow() - timedelta(days=0)
state_1 = States(
entity_id="sensor.linked_old_state_id",
domain="sensor",
state="keep",
attributes="{}",
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
old_state_id=1,
)
timestamp = dt_util.utcnow() - timedelta(days=4)
state_2 = States(
entity_id="sensor.linked_old_state_id",
domain="sensor",
state="keep",
attributes="{}",
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
old_state_id=2,
)
state_3 = States(
entity_id="sensor.linked_old_state_id",
domain="sensor",
state="keep",
attributes="{}",
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
old_state_id=62, # keep
)
session.add_all((state_1, state_2, state_3))
# Add event that should be keeped
session.add(
Events(
event_id=100,
event_type="EVENT_KEEP",
event_data="{}",
origin="LOCAL",
created=timestamp,
time_fired=timestamp,
)
)
service_data = {"keep_days": 10}
_add_db_entries(hass)
with session_scope(hass=hass) as session:
states = session.query(States)
assert states.count() == 74
events_state_changed = session.query(Events).filter(
Events.event_type == EVENT_STATE_CHANGED
)
events_keep = session.query(Events).filter(Events.event_type == "EVENT_KEEP")
assert events_state_changed.count() == 70
assert events_keep.count() == 1
# Normal purge doesn't remove excluded entities
await hass.services.async_call(
recorder.DOMAIN, recorder.SERVICE_PURGE, service_data
)
await hass.async_block_till_done()
await async_recorder_block_till_done(hass, instance)
await async_wait_purge_done(hass, instance)
assert states.count() == 74
assert events_state_changed.count() == 70
assert events_keep.count() == 1
# Test with 'apply_filter' = True
service_data["apply_filter"] = True
await hass.services.async_call(
recorder.DOMAIN, recorder.SERVICE_PURGE, service_data
)
await hass.async_block_till_done()
await async_recorder_block_till_done(hass, instance)
await async_wait_purge_done(hass, instance)
await async_recorder_block_till_done(hass, instance)
await async_wait_purge_done(hass, instance)
assert states.count() == 13
assert events_state_changed.count() == 10
assert events_keep.count() == 1
states_sensor_excluded = session.query(States).filter(
States.entity_id == "sensor.excluded"
)
assert states_sensor_excluded.count() == 0
assert session.query(States).get(72).old_state_id is None
assert session.query(States).get(73).old_state_id is None
assert session.query(States).get(74).old_state_id == 62 # should have been kept
async def test_purge_filtered_events(
hass: HomeAssistantType,
async_setup_recorder_instance: SetupRecorderInstanceT,
):
"""Test filtered events are purged."""
config: ConfigType = {"exclude": {"event_types": ["EVENT_PURGE"]}}
instance = await async_setup_recorder_instance(hass, config)
def _add_db_entries(hass: HomeAssistantType) -> None:
with recorder.session_scope(hass=hass) as session:
# Add events that should be purged
for days in range(1, 4):
timestamp = dt_util.utcnow() - timedelta(days=days)
for event_id in range(1000, 1020):
session.add(
Events(
event_id=event_id * days,
event_type="EVENT_PURGE",
event_data="{}",
origin="LOCAL",
created=timestamp,
time_fired=timestamp,
)
)
# Add states and state_changed events that should be keeped
timestamp = dt_util.utcnow() - timedelta(days=1)
for event_id in range(200, 210):
_add_state_and_state_changed_event(
session,
"sensor.keep",
"keep",
timestamp,
event_id,
)
service_data = {"keep_days": 10}
_add_db_entries(hass)
with session_scope(hass=hass) as session:
events_purge = session.query(Events).filter(Events.event_type == "EVENT_PURGE")
events_keep = session.query(Events).filter(
Events.event_type == EVENT_STATE_CHANGED
)
states = session.query(States)
assert events_purge.count() == 60
assert events_keep.count() == 10
assert states.count() == 10
# Normal purge doesn't remove excluded events
await hass.services.async_call(
recorder.DOMAIN, recorder.SERVICE_PURGE, service_data
)
await hass.async_block_till_done()
await async_recorder_block_till_done(hass, instance)
await async_wait_purge_done(hass, instance)
assert events_purge.count() == 60
assert events_keep.count() == 10
assert states.count() == 10
# Test with 'apply_filter' = True
service_data["apply_filter"] = True
await hass.services.async_call(
recorder.DOMAIN, recorder.SERVICE_PURGE, service_data
)
await hass.async_block_till_done()
await async_recorder_block_till_done(hass, instance)
await async_wait_purge_done(hass, instance)
await async_recorder_block_till_done(hass, instance)
await async_wait_purge_done(hass, instance)
assert events_purge.count() == 0
assert events_keep.count() == 10
assert states.count() == 10
async def test_purge_filtered_events_state_changed(
hass: HomeAssistantType,
async_setup_recorder_instance: SetupRecorderInstanceT,
):
"""Test filtered state_changed events are purged. This should also remove all states."""
config: ConfigType = {"exclude": {"event_types": [EVENT_STATE_CHANGED]}}
instance = await async_setup_recorder_instance(hass, config)
# Assert entity_id is NOT excluded
assert instance.entity_filter("sensor.excluded") is True
def _add_db_entries(hass: HomeAssistantType) -> None:
with recorder.session_scope(hass=hass) as session:
# Add states and state_changed events that should be purged
for days in range(1, 4):
timestamp = dt_util.utcnow() - timedelta(days=days)
for event_id in range(1000, 1020):
_add_state_and_state_changed_event(
session,
"sensor.excluded",
"purgeme",
timestamp,
event_id * days,
)
# Add events that should be keeped
timestamp = dt_util.utcnow() - timedelta(days=1)
for event_id in range(200, 210):
session.add(
Events(
event_id=event_id,
event_type="EVENT_KEEP",
event_data="{}",
origin="LOCAL",
created=timestamp,
time_fired=timestamp,
)
)
# Add states with linked old_state_ids that need to be handled
timestamp = dt_util.utcnow() - timedelta(days=0)
state_1 = States(
entity_id="sensor.linked_old_state_id",
domain="sensor",
state="keep",
attributes="{}",
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
old_state_id=1,
)
timestamp = dt_util.utcnow() - timedelta(days=4)
state_2 = States(
entity_id="sensor.linked_old_state_id",
domain="sensor",
state="keep",
attributes="{}",
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
old_state_id=2,
)
state_3 = States(
entity_id="sensor.linked_old_state_id",
domain="sensor",
state="keep",
attributes="{}",
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
old_state_id=62, # keep
)
session.add_all((state_1, state_2, state_3))
service_data = {"keep_days": 10, "apply_filter": True}
_add_db_entries(hass)
with session_scope(hass=hass) as session:
events_keep = session.query(Events).filter(Events.event_type == "EVENT_KEEP")
events_purge = session.query(Events).filter(
Events.event_type == EVENT_STATE_CHANGED
)
states = session.query(States)
assert events_keep.count() == 10
assert events_purge.count() == 60
assert states.count() == 63
await hass.services.async_call(
recorder.DOMAIN, recorder.SERVICE_PURGE, service_data
)
await hass.async_block_till_done()
await async_recorder_block_till_done(hass, instance)
await async_wait_purge_done(hass, instance)
await async_recorder_block_till_done(hass, instance)
await async_wait_purge_done(hass, instance)
assert events_keep.count() == 10
assert events_purge.count() == 0
assert states.count() == 3
assert session.query(States).get(61).old_state_id is None
assert session.query(States).get(62).old_state_id is None
assert session.query(States).get(63).old_state_id == 62 # should have been kept
async def _add_test_states(hass: HomeAssistantType, instance: recorder.Recorder):
"""Add multiple states to the db for testing."""
utcnow = dt_util.utcnow()
five_days_ago = utcnow - timedelta(days=5)
eleven_days_ago = utcnow - timedelta(days=11)
attributes = {"test_attr": 5, "test_attr_10": "nice"}
await hass.async_block_till_done()
await async_wait_recording_done(hass, instance)
with recorder.session_scope(hass=hass) as session:
old_state_id = None
for event_id in range(6):
if event_id < 2:
timestamp = eleven_days_ago
state = "autopurgeme"
elif event_id < 4:
timestamp = five_days_ago
state = "purgeme"
else:
timestamp = utcnow
state = "dontpurgeme"
event = Events(
event_type="state_changed",
event_data="{}",
origin="LOCAL",
created=timestamp,
time_fired=timestamp,
)
session.add(event)
session.flush()
state = States(
entity_id="test.recorder2",
domain="sensor",
state=state,
attributes=json.dumps(attributes),
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
event_id=event.event_id,
old_state_id=old_state_id,
)
session.add(state)
session.flush()
old_state_id = state.state_id
async def _add_test_events(hass: HomeAssistantType, instance: recorder.Recorder):
"""Add a few events for testing."""
utcnow = dt_util.utcnow()
five_days_ago = utcnow - timedelta(days=5)
eleven_days_ago = utcnow - timedelta(days=11)
event_data = {"test_attr": 5, "test_attr_10": "nice"}
await hass.async_block_till_done()
await async_wait_recording_done(hass, instance)
with recorder.session_scope(hass=hass) as session:
for event_id in range(6):
if event_id < 2:
timestamp = eleven_days_ago
event_type = "EVENT_TEST_AUTOPURGE"
elif event_id < 4:
timestamp = five_days_ago
event_type = "EVENT_TEST_PURGE"
else:
timestamp = utcnow
event_type = "EVENT_TEST"
session.add(
Events(
event_type=event_type,
event_data=json.dumps(event_data),
origin="LOCAL",
created=timestamp,
time_fired=timestamp,
)
)
async def _add_test_recorder_runs(hass: HomeAssistantType, instance: recorder.Recorder):
"""Add a few recorder_runs for testing."""
utcnow = dt_util.utcnow()
five_days_ago = utcnow - timedelta(days=5)
eleven_days_ago = utcnow - timedelta(days=11)
await hass.async_block_till_done()
await async_wait_recording_done(hass, instance)
with recorder.session_scope(hass=hass) as session:
for rec_id in range(6):
if rec_id < 2:
timestamp = eleven_days_ago
elif rec_id < 4:
timestamp = five_days_ago
else:
timestamp = utcnow
session.add(
RecorderRuns(
start=timestamp,
created=dt_util.utcnow(),
end=timestamp + timedelta(days=1),
)
)
def _add_state_and_state_changed_event(
session: Session,
entity_id: str,
state: str,
timestamp: datetime,
event_id: int,
) -> None:
"""Add state and state_changed event to database for testing."""
session.add(
States(
entity_id=entity_id,
domain="sensor",
state=state,
attributes="{}",
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
event_id=event_id,
)
)
session.add(
Events(
event_id=event_id,
event_type=EVENT_STATE_CHANGED,
event_data="{}",
origin="LOCAL",
created=timestamp,
time_fired=timestamp,
)
)
|
|
#!/usr/bin/python
'''
Copyright (c) 2005 Hewlett-Packard Company
All rights reserved
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the Hewlett-Packard Company nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Convert PNM files into icons suitable for display on a Clipboard
screen.
P1(PBM) - ascii bitmap (only two colors)
P2(PGM) - ascii greymap (only grey levels)
P3(PPM) - ascii truecolor
P4(PBM) - binary bitmap
P5(PGM) - binary greymap
P6(PPM) - binary truecolor
Icons are usually 24 pixels width by 16 pixels tall. They are stored
in a uint8_t array in the following format:
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
Byte #0 Byte #16 Byte #32
Byte #1 Byte #17 Byte #33
. . .
Byte #15 Byte #31 Byte #47
Note that the bit order is LSB first. Black pixels have the bit SET,
white pixels have the bit cleared.
However....to match the font drawing code, we actually need to order
the bytes WITHIN each icon to be arranged on rows. For example, say
we had three icons in a file, each with row stride 3 and height 4. The
bytes would be laid out:
0 1 2 9 10 11 18 19 20
3 4 5 12 13 14 21 22 23
6 7 8 15 16 17 24 25 26
We expect to find a comment line in the PNM file that specifies the
number of icons (and we expect each icon to have a width that is
a multiple of 8). The comment line should look like:
# IconCount <number>
Andrew Christian <[email protected]>
18 March 2005
'''
import sys
from os.path import basename
bitval = [ 1, 2, 4, 8, 16, 32, 64, 128 ]
def byte_to_ascii(x):
result = ""
for t in bitval:
result += x & t and '#' or ' '
return result
def reverse_byte(x):
result = 0
if x & 1: result += 128
if x & 2: result += 64
if x & 4: result += 32
if x & 8: result += 16
if x & 16: result += 8
if x & 32: result += 4
if x & 64: result += 2
if x & 128: result += 1
return result
def pack_bytes(values):
'Pack a sequence of 1 and 0 values into bytes'
result = []
while len(values):
c = sum([x[0] * x[1] for x in zip(values[:8],bitval)])
result.append(c)
values = values[8:]
return result
class Bitmap:
def __init__(self,filename):
self.filename = filename
fd = open(filename)
self.filetype = fd.readline().split()[0]
load_function = getattr(self,"load_" + self.filetype, None)
if not load_function:
print >>sys.stderr, 'Unrecognized file type', self.filetype
sys.exit(1)
load_function( fd )
fd.close()
def width_height(self,fd):
self.icon_count = 1
line = fd.readline()
while line.startswith('#'):
d = line.split()
if len(d) == 3 and d[1] == "IconCount":
self.icon_count = int(d[2])
line = fd.readline()
total_width, self.height = map(int, line.split())
self.width = total_width / self.icon_count
self.row_stride = self.width / 8
self.data = [ [] for x in range(self.icon_count) ] # One per icon
self.bytes = self.height * self.row_stride * self.icon_count
def max_gray(self,fd):
'Read the next text line for an integer value'
line = fd.readline()
while line.startswith('#'):
line = fd.readline()
return int(line)
def fill_data(self,values):
'''Take a list of byte values and pack them into the
individual icons in drawing order'''
i = 0
while len(values):
self.data[i] += values[:self.row_stride]
values = values[self.row_stride:]
i += 1
if i >= self.icon_count: i = 0
def load_P2(self,fd):
'Load an ASCII portable gray map'
self.width_height(fd)
maxgray = self.max_gray(fd)
# Read the file and convert to 1's and 0's
values = [int(x) < maxgray / 2 and 1 or 0 for x in fd.read().split()]
values = pack_bytes(values)
self.fill_data(values)
def load_P4(self,fd):
'Load a binary bitmap'
self.width_height(fd)
values = [reverse_byte(ord(x)) for x in fd.read()]
self.fill_data(values)
def ascii_art(self,index):
data = self.data[index]
result = ""
for i in range(0,len(data),self.row_stride):
result += "".join([byte_to_ascii(x) for x in data[i:i+self.row_stride]]) + "\n"
return result
def data_structure(self):
bname = basename(self.filename)
name = bname
if '.' in name:
name = name.split('.',1)[0]
result = """
const uint8_t %s_icon_data[%d] = {
""" % (name, self.bytes)
for d in self.data:
result += " " + "".join(["%d," % x for x in d]) + "\n"
result += "};\n";
result += """
// struct ICON { uint8_t width, uint8_t height, uint8_t row_stride, uint8_t count, uint8_t *data };
const struct ICON %s_icon = { %d, %d, %d, %d, &icon_%s_data };
""" % (name, self.width, self.height, self.row_stride, self.icon_count,name)
return result
def hexdump(self):
print "/* %s X=%d Y=%d count=%d" % (basename(self.filename)
,self.width, self.height, self.icon_count)
for i in range(self.icon_count):
print self.ascii_art(i)
print "*/"
print self.data_structure()
print """
/* Autogenerated ICON file */
"""
for f in sys.argv[1:]:
b = Bitmap(f)
b.hexdump()
|
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
LOG = logging.getLogger(__name__)
class SaharaCluster(resource.Resource):
PROPERTIES = (
NAME, PLUGIN_NAME, HADOOP_VERSION, CLUSTER_TEMPLATE_ID,
KEY_NAME, IMAGE, MANAGEMENT_NETWORK, IMAGE_ID,
USE_AUTOCONFIG
) = (
'name', 'plugin_name', 'hadoop_version', 'cluster_template_id',
'key_name', 'image', 'neutron_management_network', 'default_image_id',
'use_autoconfig'
)
ATTRIBUTES = (
STATUS, INFO,
) = (
"status", "info",
)
CLUSTER_STATUSES = (
CLUSTER_ACTIVE, CLUSTER_ERROR
) = (
'Active', 'Error'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Hadoop cluster name.'),
),
PLUGIN_NAME: properties.Schema(
properties.Schema.STRING,
_('Plugin name.'),
required=True,
constraints=[
constraints.CustomConstraint('sahara.plugin')
]
),
HADOOP_VERSION: properties.Schema(
properties.Schema.STRING,
_('Version of Hadoop running on instances.'),
required=True,
),
CLUSTER_TEMPLATE_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the Cluster Template used for '
'Node Groups and configurations.'),
required=True,
),
KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Keypair added to instances to make them accessible for user.'),
constraints=[
constraints.CustomConstraint('nova.keypair')
],
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('Name or UUID of the image used to boot Hadoop nodes.'),
support_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % IMAGE_ID,
version='2015.1',
previous_status=support.SupportStatus(version='2014.2')),
constraints=[
constraints.CustomConstraint('glance.image')
],
),
IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('Default name or UUID of the image used to boot Hadoop nodes.'),
constraints=[
constraints.CustomConstraint('sahara.image'),
],
support_status=support.SupportStatus(version='2015.1')
),
MANAGEMENT_NETWORK: properties.Schema(
properties.Schema.STRING,
_('Name or UUID of network.'),
constraints=[
constraints.CustomConstraint('neutron.network')
],
),
USE_AUTOCONFIG: properties.Schema(
properties.Schema.BOOLEAN,
_("Configure most important configs automatically."),
support_status=support.SupportStatus(version='5.0.0')
)
}
attributes_schema = {
STATUS: attributes.Schema(
_("Cluster status."),
type=attributes.Schema.STRING
),
INFO: attributes.Schema(
_("Cluster information."),
type=attributes.Schema.MAP
),
}
default_client_name = 'sahara'
entity = 'clusters'
def _validate_depr_keys(self, properties, key, depr_key):
value = properties.get(key)
depr_value = properties.get(depr_key)
if value and depr_value:
raise exception.ResourcePropertyConflict(value, depr_value)
def _cluster_name(self):
name = self.properties[self.NAME]
if name:
return name
return self.physical_resource_name()
def handle_create(self):
plugin_name = self.properties[self.PLUGIN_NAME]
hadoop_version = self.properties[self.HADOOP_VERSION]
cluster_template_id = self.properties[self.CLUSTER_TEMPLATE_ID]
image_id = (self.properties[self.IMAGE_ID] or
self.properties[self.IMAGE])
if image_id:
image_id = self.client_plugin('glance').get_image_id(image_id)
# check that image is provided in case when
# cluster template is missing one
cluster_template = self.client().cluster_templates.get(
cluster_template_id)
if cluster_template.default_image_id is None and not image_id:
msg = _("%(img)s must be provided: Referenced cluster template "
"%(tmpl)s has no default_image_id defined.") % {
'img': self.IMAGE, 'tmpl': cluster_template_id}
raise exception.StackValidationFailed(message=msg)
key_name = self.properties[self.KEY_NAME]
net_id = self.properties[self.MANAGEMENT_NETWORK]
if net_id:
if self.is_using_neutron():
net_id = self.client_plugin('neutron').find_neutron_resource(
self.properties, self.MANAGEMENT_NETWORK, 'network')
else:
net_id = self.client_plugin('nova').get_nova_network_id(
net_id)
use_autoconfig = self.properties[self.USE_AUTOCONFIG]
cluster = self.client().clusters.create(
self._cluster_name(),
plugin_name, hadoop_version,
cluster_template_id=cluster_template_id,
user_keypair_id=key_name,
default_image_id=image_id,
net_id=net_id,
use_autoconfig=use_autoconfig)
LOG.info(_LI('Cluster "%s" is being started.'), cluster.name)
self.resource_id_set(cluster.id)
return self.resource_id
def check_create_complete(self, cluster_id):
cluster = self.client().clusters.get(cluster_id)
if cluster.status == self.CLUSTER_ERROR:
raise exception.ResourceInError(resource_status=cluster.status)
if cluster.status != self.CLUSTER_ACTIVE:
return False
LOG.info(_LI("Cluster '%s' has been created"), cluster.name)
return True
def check_delete_complete(self, resource_id):
if not resource_id:
return True
try:
cluster = self.client().clusters.get(resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
LOG.info(_LI("Cluster '%s' has been deleted"),
self._cluster_name())
return True
else:
if cluster.status == self.CLUSTER_ERROR:
raise exception.ResourceInError(resource_status=cluster.status)
return False
def _resolve_attribute(self, name):
cluster = self.client().clusters.get(self.resource_id)
return getattr(cluster, name, None)
def validate(self):
res = super(SaharaCluster, self).validate()
if res:
return res
self._validate_depr_keys(self.properties, self.IMAGE_ID, self.IMAGE)
# check if running on neutron and MANAGEMENT_NETWORK missing
if (self.is_using_neutron() and
not self.properties[self.MANAGEMENT_NETWORK]):
msg = _("%s must be provided"
) % self.MANAGEMENT_NETWORK
raise exception.StackValidationFailed(message=msg)
self.client_plugin().validate_hadoop_version(
self.properties[self.PLUGIN_NAME],
self.properties[self.HADOOP_VERSION]
)
def resource_mapping():
return {
'OS::Sahara::Cluster': SaharaCluster,
}
|
|
# (c) 2014, Chris Church <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
connection: winrm
short_description: Run tasks over Microsoft's WinRM
description:
- Run commands or put/fetch on a target via WinRM
version_added: "2.0"
options:
remote_addr:
description:
- Address of the windows machine
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_winrm_host
remote_user:
description:
- The user to log in as to the Windows machine
vars:
- name: ansible_user
- name: ansible_winrm_user
"""
import base64
import inspect
import os
import re
import shlex
import traceback
import json
import tempfile
import subprocess
HAVE_KERBEROS = False
try:
import kerberos
HAVE_KERBEROS = True
except ImportError:
pass
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.errors import AnsibleFileNotFound
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlunsplit
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.six import binary_type
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.shell.powershell import leaf_exec
from ansible.utils.hashing import secure_hash
from ansible.utils.path import makedirs_safe
try:
import winrm
from winrm import Response
from winrm.protocol import Protocol
HAS_WINRM = True
except ImportError as e:
HAS_WINRM = False
try:
import xmltodict
HAS_XMLTODICT = True
except ImportError as e:
HAS_XMLTODICT = False
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
transport = 'winrm'
module_implementation_preferences = ('.ps1', '.exe', '')
become_methods = ['runas']
allow_executable = False
def __init__(self, *args, **kwargs):
self.has_pipelining = True
self.always_pipeline_modules = True
self.has_native_async = True
self.protocol = None
self.shell_id = None
self.delegate = None
self._shell_type = 'powershell'
# FUTURE: Add runas support
super(Connection, self).__init__(*args, **kwargs)
def set_host_overrides(self, host, variables, templar):
'''
Override WinRM-specific options from host variables.
'''
if not HAS_WINRM:
return
hostvars = {}
for k in variables:
if k.startswith('ansible_winrm'):
hostvars[k] = templar.template(variables[k])
self._winrm_host = self._play_context.remote_addr
self._winrm_port = int(self._play_context.port or 5986)
self._winrm_scheme = hostvars.get('ansible_winrm_scheme', 'http' if self._winrm_port == 5985 else 'https')
self._winrm_path = hostvars.get('ansible_winrm_path', '/wsman')
self._winrm_user = self._play_context.remote_user
self._winrm_pass = self._play_context.password
self._become_method = self._play_context.become_method
self._become_user = self._play_context.become_user
self._become_pass = self._play_context.become_pass
self._kinit_cmd = hostvars.get('ansible_winrm_kinit_cmd', 'kinit')
if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
else:
# for legacy versions of pywinrm, use the values we know are supported
self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos'])
# TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext'
if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
self._winrm_transport = 'kerberos,%s' % transport_selector
else:
self._winrm_transport = transport_selector
self._winrm_transport = hostvars.get('ansible_winrm_transport', self._winrm_transport)
if isinstance(self._winrm_transport, string_types):
self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()]
unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)
if unsupported_transports:
raise AnsibleError('The installed version of WinRM does not support transport(s) %s' % list(unsupported_transports))
# if kerberos is among our transports and there's a password specified, we're managing the tickets
kinit_mode = to_text(hostvars.get('ansible_winrm_kinit_mode', '')).strip()
if kinit_mode == "":
# HACK: ideally, remove multi-transport stuff
self._kerb_managed = "kerberos" in self._winrm_transport and self._winrm_pass
elif kinit_mode == "managed":
self._kerb_managed = True
elif kinit_mode == "manual":
self._kerb_managed = False
else:
raise AnsibleError('Unknown ansible_winrm_kinit_mode value: "%s" (must be "managed" or "manual")' % kinit_mode)
# arg names we're going passing directly
internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'])
self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
argspec = inspect.getargspec(Protocol.__init__)
supported_winrm_args = set(argspec.args)
supported_winrm_args.update(internal_kwarg_mask)
passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in hostvars if v.startswith('ansible_winrm_')])
unsupported_args = passed_winrm_args.difference(supported_winrm_args)
# warn for kwargs unsupported by the installed version of pywinrm
for arg in unsupported_args:
display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))
# pass through matching kwargs, excluding the list we want to treat specially
for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
self._winrm_kwargs[arg] = hostvars['ansible_winrm_%s' % arg]
# Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection
# auth itself with a private CCACHE.
def _kerb_auth(self, principal, password):
if password is None:
password = ""
self._kerb_ccache = tempfile.NamedTemporaryFile()
display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
krb5ccname = "FILE:%s" % self._kerb_ccache.name
krbenv = dict(KRB5CCNAME=krb5ccname)
os.environ["KRB5CCNAME"] = krb5ccname
kinit_cmdline = [self._kinit_cmd, principal]
display.vvvvv("calling kinit for principal %s" % principal)
p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=krbenv)
# TODO: unicode/py3
stdout, stderr = p.communicate(password + b'\n')
if p.returncode != 0:
raise AnsibleConnectionFailure("Kerberos auth failure: %s" % stderr.strip())
display.vvvvv("kinit succeeded for principal %s" % principal)
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
(self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
netloc = '%s:%d' % (self._winrm_host, self._winrm_port)
endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
errors = []
for transport in self._winrm_transport:
if transport == 'kerberos':
if not HAVE_KERBEROS:
errors.append('kerberos: the python kerberos library is not installed')
continue
if self._kerb_managed:
self._kerb_auth(self._winrm_user, self._winrm_pass)
display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
try:
protocol = Protocol(endpoint, transport=transport, **self._winrm_kwargs)
# open the shell from connect so we know we're able to talk to the server
if not self.shell_id:
self.shell_id = protocol.open_shell(codepage=65001) # UTF-8
display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
return protocol
except Exception as e:
err_msg = to_text(e).strip()
if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
raise AnsibleError('the connection attempt timed out')
m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
err_msg = 'the specified credentials were rejected by the server'
elif code == 411:
return protocol
errors.append(u'%s: %s' % (transport, err_msg))
display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
if errors:
raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
else:
raise AnsibleError('No transport found for WinRM connection')
def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False):
rq = {'env:Envelope': protocol._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
shell_id=shell_id)}
stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
.setdefault('rsp:Stream', {})
stream['@Name'] = 'stdin'
stream['@CommandId'] = command_id
stream['#text'] = base64.b64encode(to_bytes(stdin))
if eof:
stream['@End'] = 'true'
protocol.send_message(xmltodict.unparse(rq))
def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None):
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
if from_exec:
display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
else:
display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
command_id = None
try:
stdin_push_failed = False
command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None))
# TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that
# comes from this)
try:
if stdin_iterator:
for (data, is_last) in stdin_iterator:
self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
except Exception as ex:
from traceback import format_exc
display.warning("FATAL ERROR DURING FILE TRANSFER: %s" % format_exc(ex))
stdin_push_failed = True
if stdin_push_failed:
raise AnsibleError('winrm send_input failed')
# NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
# FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
resptuple = self.protocol.get_command_output(self.shell_id, command_id)
# ensure stdout/stderr are text for py3
# FUTURE: this should probably be done internally by pywinrm
response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple))
# TODO: check result from response and set stdin_push_failed if we have nonzero
if from_exec:
display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
else:
display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host)
display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host)
if stdin_push_failed:
raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (response.std_out, response.std_err))
return response
finally:
if command_id:
self.protocol.cleanup_command(self.shell_id, command_id)
def _connect(self):
if not HAS_WINRM:
raise AnsibleError("winrm or requests is not installed: %s" % to_text(e))
elif not HAS_XMLTODICT:
raise AnsibleError("xmltodict is not installed: %s" % to_text(e))
super(Connection, self)._connect()
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
return self
def _reset(self): # used by win_reboot (and any other action that might need to bounce the state)
self.protocol = None
self.shell_id = None
self._connect()
def _create_raw_wrapper_payload(self, cmd, environment=dict()):
payload = {
'module_entry': to_text(base64.b64encode(to_bytes(cmd))),
'powershell_modules': {},
'actions': ['exec'],
'exec': to_text(base64.b64encode(to_bytes(leaf_exec))),
'environment': environment
}
return json.dumps(payload)
def _wrapper_payload_stream(self, payload, buffer_size=200000):
payload_bytes = to_bytes(payload)
byte_count = len(payload_bytes)
for i in range(0, byte_count, buffer_size):
yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
# TODO: display something meaningful here
display.vvv("EXEC (via pipeline wrapper)")
stdin_iterator = None
if in_data:
stdin_iterator = self._wrapper_payload_stream(in_data)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if self.is_clixml(result.std_err):
try:
result.std_err = self.parse_clixml_stream(result.std_err)
except:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
def exec_command_old(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = shlex.split(to_bytes(cmd), posix=False)
cmd_parts = map(to_text, cmd_parts)
script = None
cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or ''
# Support running .ps1 files (via script/raw).
if cmd_ext == '.ps1':
script = '& %s' % cmd
# Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8.
elif cmd_ext in ('.bat', '.cmd'):
script = '[System.Console]::OutputEncoding = [System.Text.Encoding]::Default; & %s' % cmd
# Encode the command if not already encoded; supports running simple PowerShell commands via raw.
elif '-EncodedCommand' not in cmd_parts:
script = cmd
if script:
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
decoded_cmd = to_text(base64.b64decode(encoded_cmd).decode('utf-16-le'))
display.vvv("EXEC %s" % decoded_cmd, host=self._winrm_host)
else:
display.vvv("EXEC %s" % cmd, host=self._winrm_host)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
except Exception:
traceback.print_exc()
raise AnsibleConnectionFailure("failed to exec cmd %s" % cmd)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if self.is_clixml(result.std_err):
try:
result.std_err = self.parse_clixml_stream(result.std_err)
except:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
def is_clixml(self, value):
return value.startswith(b"#< CLIXML")
# hacky way to get just stdout- not always sure of doc framing here, so use with care
def parse_clixml_stream(self, clixml_doc, stream_name='Error'):
clear_xml = clixml_doc.replace(b'#< CLIXML\r\n', b'')
doc = xmltodict.parse(clear_xml)
lines = [l.get('#text', '').replace('_x000D__x000A_', '') for l in doc.get('Objs', {}).get('S', {}) if l.get('@S') == stream_name]
return '\r\n'.join(lines)
# FUTURE: determine buffer size at runtime via remote winrm config?
def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000):
in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
offset = 0
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
for out_data in iter((lambda: in_file.read(buffer_size)), b''):
offset += len(out_data)
self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
# yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
b64_data = base64.b64encode(out_data) + b'\r\n'
# cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal
yield b64_data, (in_file.tell() == in_size)
if offset == 0: # empty file, return an empty buffer + eof to close it
yield "", True
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
out_path = self._shell._unquote(out_path)
display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path)
script_template = u'''
begin {{
$path = '{0}'
$DebugPreference = "Continue"
$ErrorActionPreference = "Stop"
Set-StrictMode -Version 2
$fd = [System.IO.File]::Create($path)
$sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
$bytes = @() #initialize for empty file case
}}
process {{
$bytes = [System.Convert]::FromBase64String($input)
$sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
$fd.Write($bytes, 0, $bytes.Length)
}}
end {{
$sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null
$hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
$fd.Close()
Write-Output "{{""sha1"":""$hash""}}"
}}
'''
script = script_template.format(self._shell._escape(out_path))
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path))
# TODO: improve error handling
if result.status_code != 0:
raise AnsibleError(to_native(result.std_err))
put_output = json.loads(result.std_out)
remote_sha1 = put_output.get("sha1")
if not remote_sha1:
raise AnsibleError("Remote sha1 was not returned")
local_sha1 = secure_hash(in_path)
if not remote_sha1 == local_sha1:
raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1)))
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
buffer_size = 2**19 # 0.5MB chunks
makedirs_safe(os.path.dirname(out_path))
out_file = None
try:
offset = 0
while True:
try:
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$stream = New-Object IO.FileStream("%(path)s", [System.IO.FileMode]::Open, [System.IO.FileAccess]::Read, [IO.FileShare]::ReadWrite);
$stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
$buffer = New-Object Byte[] %(buffer_size)d;
$bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
$bytes = $buffer[0..($bytesRead-1)];
[System.Convert]::ToBase64String($bytes);
$stream.Close() | Out-Null;
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Host "[DIR]";
}
Else
{
Write-Error "%(path)s does not exist";
Exit 1;
}
''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(to_native(result.std_err))
if result.std_out.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(result.std_out.strip())
if data is None:
makedirs_safe(out_path)
break
else:
if not out_file:
# If out_path is a directory and we're expecting a file, bail out now.
if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')):
break
out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb')
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
except Exception:
traceback.print_exc()
raise AnsibleError('failed to transfer file to "%s"' % out_path)
finally:
if out_file:
out_file.close()
def close(self):
if self.protocol and self.shell_id:
display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host)
self.protocol.close_shell(self.shell_id)
self.shell_id = None
self.protocol = None
self._connected = False
|
|
#!/usr/bin/env python3
'''
Generates SIPS by calling various microservices and functions.
add in a check at the start - check if AF number is actually in the CSV.
'''
import os
import argparse
import sys
import shutil
import datetime
import time
import copyit
import ififuncs
import package_update
import accession
import manifest
import makezip
import accession
from masscopy import analyze_log
try:
from clairmeta.utils.xml import prettyprint_xml
from clairmeta import DCP
import dicttoxml
except ImportError:
print('Clairmeta is not installed. DCP options will not function!')
def make_folder_path(path, args, object_entry):
'''
Generates objects/logs/metadata/UUID folder structure in output directory.
Asks user for UUID if it's not supplied in an arg.
Adds a workaround for special collections workflows.
Returns the path.
UNITTEST - does path exist
'''
if not args.u:
representation_uuid = ififuncs.create_uuid()
else:
representation_uuid = args.u
if args.sc:
oe_path = args.o
else:
oe_path = os.path.join(path, object_entry)
path = os.path.join(oe_path, representation_uuid)
ififuncs.make_folder_structure(path)
return path
def consolidate_manifests(path, directory, new_log_textfile):
'''
Consolidates all manifests in the objects folder
moves old manifests into logs
renames manifest with uuid and updates paths in manifest textfile.
'''
uuid = os.path.basename(path)
objects_dir = os.path.join(path, directory)
new_manifest_textfile = os.path.join(
os.path.dirname(path), uuid + '_manifest.md5'
)
collective_manifest = []
for manifest in os.listdir(objects_dir):
if ififuncs.check_if_manifest(manifest):
ififuncs.generate_log(
new_log_textfile,
'EVENT = Manifest consolidation - Checksums from %s merged into %s' % (os.path.join(objects_dir, manifest), new_manifest_textfile)
)
with open(os.path.join(objects_dir, manifest), 'r', encoding='utf-8') as fo:
manifest_lines = fo.readlines()
for i in manifest_lines:
# This is what appends the new path to existing paths.
new_manifest_path = uuid + '/%s/' % directory + i[34:]
collective_manifest.append(
i[:32] + ' ' + new_manifest_path
)
# Cut and paste old manifests into the log directory
shutil.move(
objects_dir + '/' + manifest, os.path.join(path, 'logs')
)
ififuncs.generate_log(
new_log_textfile,
'EVENT = Manifest movement - Manifest from %s to %s' % (objects_dir + '/' + manifest, os.path.join(path, 'logs'))
)
with open(new_manifest_textfile, 'a', encoding='utf-8') as manifest_object:
for checksums in collective_manifest:
manifest_object.write(checksums)
return new_manifest_textfile
def consolidate_logs(lognames, path):
'''
Finds moveit.py logs on the desktop
Copies all text into a single log file
Saves it in the SIP
'''
uuid = os.path.basename(path)
new_log_textfile = os.path.join(path, 'logs' + '/' + uuid + '_sip_log.log')
for log in lognames:
with open(log, 'r') as fo:
log_lines = fo.readlines()
with open(new_log_textfile, 'a') as log_object:
for lines in log_lines:
log_object.write(lines)
def move_files(inputs, sip_path, args, user):
'''
Runs moveit.py on all inputs
'''
log_names = []
for item in inputs:
cmd = [item, os.path.join(sip_path, 'objects')]
if args.move:
cmd.append('-move')
if args.l:
cmd.append('-l')
log_name = copyit.main(cmd)
log_names.append(log_name)
if args.rename_uuid:
if os.path.isfile(item):
objects_dir = os.path.join(sip_path, 'objects')
uuid = os.path.basename(sip_path)
old_basename, ext = os.path.splitext(item)
new_path = os.path.join(objects_dir, uuid + ext)
os.rename(os.path.join(objects_dir, os.path.basename(item)), new_path)
manifest = os.path.join(os.path.dirname(new_path), os.path.basename(item)) + '_manifest.md5'
updated_lines = []
ififuncs.generate_log(
log_name,
'EVENT = Filename change - eventDetail=original filename replaced with uuid, eventOutcomeDetailNote=%s replaced with %s, agentName=%s, agentName=sipcreator.py))' % (os.path.basename(item), uuid + ext, user))
with open(manifest, 'r') as file_object:
checksums = file_object.readlines()
for line in checksums:
if os.path.basename(item) in line:
line = line.replace(os.path.basename(item), os.path.basename(new_path))
updated_lines.append(line)
with open(manifest, 'w') as fo:
for lines in updated_lines:
fo.write(lines)
consolidate_logs(log_names, sip_path)
return log_names
def log_report(log_names):
'''
Analyzes all the moveit.py logs on the desktop and print a report.
'''
desktop_logs_dir = ififuncs.make_desktop_logs_dir()
for i in log_names:
if os.path.isfile(i):
print(("%-*s : copyit job was a %s" % (50, os.path.basename(i)[:-24], analyze_log(i))))
else:
print((i, 'can\'t find log file, trying again...'))
log_names.remove(i)
for logs in os.listdir(desktop_logs_dir):
# look at log filename minus the seconds and '.log'
if os.path.basename(i)[:-7] in logs:
# make sure that the alternate log filename is more recent
if int(
os.path.basename(logs)[-12:-4].replace('_', '')
) > int(
os.path.basename(i)[-12:-4].replace('_', '')):
print(('trying to analyze %s' % logs))
print(("%-*s : %s" % (
50, os.path.basename(logs)[:-24], analyze_log(
os.path.join(desktop_logs_dir, logs))
)))
log_names.append(os.path.join(desktop_logs_dir, logs))
def parse_args(args_):
'''
Parse command line arguments.
'''
parser = argparse.ArgumentParser(
description='Wraps objects into an Irish Film Institute SIP'
' Written by Kieran O\'Leary.'
)
parser.add_argument(
'-i', nargs='+',
help='full path of input directory', required=True
)
parser.add_argument(
'-o', '-output',
help='full path of output directory', required=True
)
parser.add_argument(
'-u', '-uuid',
help='Use a pre-existing UUID instead of a newly generated UUID.'
)
parser.add_argument(
'-rename_uuid', action='store_true',
help='Use with caution! This will rename an object with a randonly generated UUID'
)
parser.add_argument(
'-user',
help='Declare who you are. If this is not set, you will be prompted.'
)
parser.add_argument(
'-d', '-dcp', action='store_true',
help='Adds DCP specific processing, like creating objects subfolder with text extracted from <ContentTitleText> in the CPL.'
)
parser.add_argument(
'-quiet', action='store_true',
help='Quiet mode, suppresses the analyze_logs() report'
)
parser.add_argument(
'-move', action='store_true',
help='invokes the -move argument in copyit.py - moves instead of copy.'
)
parser.add_argument(
'-l', action='store_true',
help='invokes the -lto argument in copyit.py - uses gcp instead of rsync.'
)
parser.add_argument(
'-sc', action='store_true',
help='special collections workflow'
)
parser.add_argument(
'-zip', action='store_true',
help='Uses makezip.py to store the objects in an uncompressed ZIP'
)
parser.add_argument(
'-accession', action='store_true',
help='Launches accession.py immediately after sipcreator.py finishes. This is only useful if the SIP has already passed QC and will definitely be accessioned and ingested.'
)
parser.add_argument(
'-filmo_csv',
help='Enter the path to the Filmographic CSV so that the metadata will be stored within the package.'
)
parser.add_argument(
'-oe',
help='Enter the Object Entry number for the representation.SIP will be placed in a folder with this name.'
)
parser.add_argument(
'-manifest',
help='Enter the full path to a manifest for the files within a ZIP. This will be stored in the supplemental folder.'
)
parser.add_argument(
'-supplement', nargs='+',
help='Enter the full path of files or folders that are to be added to the supplemental subfolder within the metadata folder. Use this for information that supplements your preservation objects but is not to be included in the objects folder.'
)
parsed_args = parser.parse_args(args_)
return parsed_args
def create_content_title_text(sip_path, args):
'''
DCPs are often delivered with inconsistent foldernames.
This will rename the parent folder with the value recorded in <ContentTitleText>
For example:
Original name: CHARBON-SMPTE-24
New name: CHARBON-SMPTE-24-INTEROP-SUBS_TST_S_XX-EN_FR_XX_2K_CHA-20120613_CHA_OV
Rename will only occur if user agrees.
'''
cpl = ififuncs.find_cpl(args.i[0])
objects_dir = os.path.join(sip_path, 'objects')
dcp_dirname = os.path.dirname(cpl)
content_title = ififuncs.get_contenttitletext(cpl)
dci_foldername = os.path.join(objects_dir, content_title)
rename_dcp = ififuncs.ask_yes_no(
'Do you want to rename %s with %s ?' % (os.path.basename(dcp_dirname), dci_foldername)
)
if rename_dcp == 'N':
print('Exiting')
sys.exit()
return content_title
def normalise_objects_manifest(sip_path):
'''
For a root copy workflow, the objects manifest is in the
uuid directory, not the objects directory. This will move it
into the objects directory.
'''
objects_manifest = os.path.join(sip_path, 'objects_manifest.md5')
if os.path.isfile(objects_manifest):
updated_manifest_lines = []
with open(objects_manifest, 'r') as fo:
manifest_lines = fo.readlines()
for i in manifest_lines:
# This is what appends the new path to existing paths.
replacement = i.replace(' objects/', ' ')
updated_manifest_lines.append(replacement)
with open(objects_manifest, 'w') as fo:
for x in updated_manifest_lines:
fo.write(x)
# Cut and paste old manifests into the log directory
shutil.move(
objects_manifest, os.path.join(sip_path, 'objects')
)
def get_object_entry(args):
'''
Figures out which OE number to use and performs some basic validation.
UNITTEST - use the existing ifs to perform some True/False tests.
'''
if not args.sc:
if args.oe:
if args.oe[:2] != 'oe':
print('First two characters must be \'oe\' and last four characters must be four digits')
object_entry = ififuncs.get_object_entry()
elif len(args.oe[2:]) not in list(range(4, 6)):
print('First two characters must be \'oe\' and last four characters must be four digits')
object_entry = ififuncs.get_object_entry()
elif not args.oe[2:].isdigit():
object_entry = ififuncs.get_object_entry()
print('First two characters must be \'oe\' and last four characters must be four digits')
else:
object_entry = args.oe
else:
object_entry = ififuncs.get_object_entry()
else:
object_entry = 'not_applicable'
return object_entry
def determine_uuid(args, sip_path):
'''
Validates a UUID to use as the SIP identifier.
UNITTEST = validate the existing validations.
'''
if args.u:
if ififuncs.validate_uuid4(args.u) is None:
uuid = args.u
uuid_event = (
'EVENT = eventType=Identifier assignement,'
' eventIdentifierType=UUID, value=%s, module=uuid.uuid4'
) % uuid
else:
print('exiting due to invalid UUID')
sys.exit()
else:
uuid = os.path.basename(sip_path)
uuid_event = (
'EVENT = eventType=Identifier assignement,'
' eventIdentifierType=UUID, value=%s, module=uuid.uuid4'
) % uuid
return uuid, uuid_event
def process_dcp(sip_path, content_title, args, new_manifest_textfile, new_log_textfile, metadata_dir, clairmeta_version):
'''
Runs DCP specific functions.
'''
objects_dir = os.path.join(sip_path, 'objects')
cpl = ififuncs.find_cpl(objects_dir)
dcp_dirname = os.path.dirname(cpl)
os.chdir(os.path.dirname(dcp_dirname))
os.rename(os.path.basename(dcp_dirname), content_title)
new_dcp_path = os.path.join('objects', content_title).replace("\\", "/")
absolute_dcp_path = os.path.join(sip_path, new_dcp_path)
ififuncs.manifest_replace(
new_manifest_textfile,
os.path.join('objects', os.path.basename(args.i[0])).replace("\\", "/"),
new_dcp_path
)
'''
a = subprocess.check_output(['python', '-m', 'clairmeta.cli', 'check', '-type', 'dcp', absolute_dcp_path], stderr=subprocess.STDOUT)
b = subprocess.check_output(['python', '-m', 'clairmeta.cli', 'probe', '-type', 'dcp', '-format', 'xml', absolute_dcp_path], stderr=subprocess.STDOUT)
'''
dcp = DCP(absolute_dcp_path)
dcp_dict = dcp.parse()
# json_str = json.dumps(dcp_dict , sort_keys=True, indent=2, separators=(',', ': '))
xml_str = dicttoxml.dicttoxml(dcp_dict, custom_root='ClairmetaProbe', ids=False, attr_type=False)
xml_pretty = prettyprint_xml(xml_str)
status, report = dcp.check()
ififuncs.generate_log(
new_log_textfile,
'EVENT = eventType=validation, eventOutcome=%s, eventDetail=%s, agentName=Clairmeta version %s' % (
status, report, clairmeta_version
)
)
clairmeta_xml = os.path.join(metadata_dir, '%s_clairmeta.xml' % content_title)
ififuncs.generate_log(
new_log_textfile,
'EVENT = Metadata extraction - eventDetail=Clairmeta DCP metadata extraction, eventOutcome=%s, agentName=Clairmeta version %s' % (clairmeta_xml, clairmeta_version)
)
with open(clairmeta_xml, 'w') as fo:
fo.write(xml_pretty)
ififuncs.checksum_replace(new_manifest_textfile, new_log_textfile, 'md5')
ififuncs.manifest_update(new_manifest_textfile, clairmeta_xml)
print(status)
print(report)
def make_oe_register():
'''
This sends a placeholder oe register to the desktop logs directory.
This should get rid of some of the more painful, repetitive identifier matching.
'''
desktop_logs_dir = ififuncs.make_desktop_logs_dir()
oe_register = os.path.join(
desktop_logs_dir,
'oe_helper_register_' + time.strftime("%Y-%m-%dT%H_%M_%S.csv")
)
ififuncs.create_csv(oe_register, (
'OE No.',
'Date Received',
'Quantity',
'Format',
'Description',
'Contact Name',
'Type of Acquisition',
'Accession No.',
'Additional Information',
'Habitat',
'Vinegar No'
))
return oe_register
def main(args_):
'''
Launch all the functions for creating an IFI SIP.
'''
args = parse_args(args_)
start = datetime.datetime.now()
inputs = args.i
for input in inputs:
if ififuncs.check_av_or_doc(input) == 'av':
ififuncs.check_existence(['mediainfo'])
elif ififuncs.check_av_or_doc(input) == 'doc':
ififuncs.check_existence(['sf', 'exiftool'])
if args.d:
try:
import clairmeta
clairmeta_version = clairmeta.__version__
except ImportError:
print('Exiting as Clairmeta is not installed. If there is a case for not using clairmeta, please let me know and i can make a workaround')
sys.exit()
if args.zip:
ififuncs.check_existence(['7za'])
print(args)
user = ififuncs.determine_user(args)
object_entry = get_object_entry(args)
sip_path = make_folder_path(os.path.join(args.o), args, object_entry)
uuid, uuid_event = determine_uuid(args, sip_path)
new_log_textfile = os.path.join(sip_path, 'logs' + '/' + uuid + '_sip_log.log')
if args.d:
content_title = create_content_title_text(sip_path, args)
ififuncs.generate_log(
new_log_textfile,
'EVENT = sipcreator.py started'
)
ififuncs.generate_log(
new_log_textfile,
'eventDetail=sipcreator.py %s' % ififuncs.get_script_version('sipcreator.py')
)
ififuncs.generate_log(
new_log_textfile,
'Command line arguments: %s' % args
)
ififuncs.generate_log(
new_log_textfile,
'EVENT = agentName=%s' % user
)
ififuncs.generate_log(
new_log_textfile,
uuid_event
)
if not args.sc:
ififuncs.generate_log(
new_log_textfile,
'EVENT = eventType=Identifier assignement,'
' eventIdentifierType=object entry, value=%s'
% object_entry
)
metadata_dir = os.path.join(sip_path, 'metadata')
supplemental_dir = os.path.join(metadata_dir, 'supplemental')
logs_dir = os.path.join(sip_path, 'logs')
if args.accession:
accession_number = ififuncs.get_accession_number()
reference_number = ififuncs.get_reference_number()
parent = ififuncs.ask_question('What is the parent record? eg MV 1234. Enter n/a if this is a born digital acquisition with no parent.')
donor = ififuncs.ask_question('Who is the source of acquisition, as appears on the donor agreement? This will not affect Reproductions.')
reproduction_creator = ififuncs.ask_question('Who is the reproduction creator? This will not affect acquisitions. Enter n/a if not applicable')
depositor_reference = ififuncs.ask_question('What is the donor/depositor number? This will not affect Reproductions.')
acquisition_type = ififuncs.get_acquisition_type('')
donation_date = ififuncs.ask_question('When was the donation date in DD/MM/YYYY format? Eg. 31/12/1999 - Unfortunately this is NOT using ISO 8601.')
if args.zip:
inputxml, inputtracexml, dfxml = ififuncs.generate_mediainfo_xmls(inputs[0], args.o, uuid, new_log_textfile)
if args.manifest:
shutil.copy(args.manifest, args.manifest.replace('_manifest.md5', '_manifest-md5.txt'))
source_manifest = args.manifest.replace('_manifest.md5', '_manifest-md5.txt')
else:
source_manifest = os.path.join(
args.o,
os.path.basename(args.i[0]) + '_manifest-md5.txt'
)
ififuncs.generate_log(
new_log_textfile,
'EVENT = message digest calculation, status=started, eventType=messageDigestCalculation, agentName=hashlib, eventDetail=MD5 checksum of source files within ZIP'
)
ififuncs.hashlib_manifest(args.i[0], source_manifest, os.path.dirname(args.i[0]))
ififuncs.generate_log(
new_log_textfile,
'EVENT = message digest calculation, status=finished, eventType=messageDigestCalculation, agentName=hashlib, eventDetail=MD5 checksum of source files within ZIP'
)
ififuncs.generate_log(
new_log_textfile,
'EVENT = packing, status=started, eventType=packing, agentName=makezip.py, eventDetail=Source object to be packed=%s' % inputs[0]
)
makezip_judgement, zip_file = makezip.main(['-i', inputs[0], '-o', os.path.join(sip_path, 'objects'), '-basename', uuid + '.zip'])
ififuncs.generate_log(
new_log_textfile,
'EVENT = packing, status=finished, eventType=packing, agentName=makezip.py, eventDetail=Source object packed into=%s' % zip_file
)
if makezip_judgement is None:
judgement = 'lossless'
else:
judgement = makezip_judgement
ififuncs.generate_log(
new_log_textfile,
'EVENT = losslessness verification, status=finished, eventType=messageDigestCalculation, agentName=makezip.py, eventDetail=embedded crc32 checksum validation, eventOutcome=%s' % judgement
)
ififuncs.generate_log(
new_log_textfile,
'EVENT = losslessness verification, status=finished, eventType=messageDigestCalculation, agentName=makezip.py, eventDetail=embedded crc32 checksum validation, eventOutcome=%s' % judgement
)
else:
log_names = move_files(inputs, sip_path, args, user)
ififuncs.get_technical_metadata(sip_path, new_log_textfile)
ififuncs.hashlib_manifest(
metadata_dir, metadata_dir + '/metadata_manifest.md5', metadata_dir
)
if args.sc:
normalise_objects_manifest(sip_path)
new_manifest_textfile = consolidate_manifests(sip_path, 'objects', new_log_textfile)
if args.zip:
if zip_file.endswith('.001'):
for split_archive in os.listdir(os.path.dirname(zip_file)):
ififuncs.generate_log(
new_log_textfile, 'EVENT = Message Digest Calculation, status=started, eventType=message digest calculation, eventDetail=%s module=hashlib' % split_archive
)
ififuncs.manifest_update(new_manifest_textfile, os.path.join(os.path.dirname(zip_file), split_archive))
ififuncs.generate_log(
new_log_textfile, 'EVENT = Message Digest Calculation, status=finished, eventType=message digest calculation, eventDetail=%s module=hashlib' % split_archive
)
else:
ififuncs.generate_log(
new_log_textfile, 'EVENT = Message Digest Calculation, status=started, eventType=message digest calculation, eventDetail=%s module=hashlib' % zip_file
)
ififuncs.manifest_update(new_manifest_textfile, zip_file)
ififuncs.generate_log(
new_log_textfile, 'EVENT = Message Digest Calculation, status=finished, eventType=message digest calculation, eventDetail=%s module=hashlib' % zip_file
)
consolidate_manifests(sip_path, 'metadata', new_log_textfile)
ififuncs.hashlib_append(
logs_dir, new_manifest_textfile,
os.path.dirname(os.path.dirname(logs_dir))
)
if args.supplement:
os.makedirs(supplemental_dir)
supplement_cmd = ['-i', args.supplement, '-user', user, '-new_folder', supplemental_dir, os.path.dirname(sip_path), '-copy']
package_update.main(supplement_cmd)
if args.zip:
os.makedirs(supplemental_dir)
supplement_cmd = ['-i', [inputxml, inputtracexml, dfxml, source_manifest], '-user', user, '-new_folder', supplemental_dir, os.path.dirname(sip_path), '-copy']
package_update.main(supplement_cmd)
if args.sc:
print('Generating Digital Forensics XML')
dfxml = accession.make_dfxml(args, sip_path, uuid)
ififuncs.generate_log(
new_log_textfile,
'EVENT = Metadata extraction - eventDetail=File system metadata extraction using Digital Forensics XML, eventOutcome=%s, agentName=makedfxml' % (dfxml)
)
ififuncs.manifest_update(new_manifest_textfile, dfxml)
sha512_log = manifest.main([sip_path, '-sha512', '-s'])
sha512_manifest = os.path.join(
os.path.dirname(sip_path), uuid + '_manifest-sha512.txt'
)
ififuncs.merge_logs_append(sha512_log, new_log_textfile, new_manifest_textfile)
ififuncs.checksum_replace(sha512_manifest, new_log_textfile, 'sha512')
os.remove(sha512_log)
ififuncs.sort_manifest(new_manifest_textfile)
if not args.quiet:
if 'log_names' in locals():
log_report(log_names)
finish = datetime.datetime.now()
print('\n- %s ran this script at %s and it finished at %s' % (user, start, finish))
if args.d:
process_dcp(sip_path, content_title, args, new_manifest_textfile, new_log_textfile, metadata_dir, clairmeta_version)
if args.accession:
register = accession.make_register()
filmographic_dict = ififuncs.extract_metadata(args.filmo_csv)[0]
for filmographic_record in filmographic_dict:
if filmographic_record['Reference Number'].lower() == reference_number.lower():
if filmographic_record['Title'] == '':
title = filmographic_record['TitleSeries'] + '; ' + filmographic_record['EpisodeNo']
else:
title = filmographic_record['Title']
oe_register = make_oe_register()
ififuncs.append_csv(oe_register, (object_entry.upper()[:2] + '-' + object_entry[2:], donation_date, '1','',title,donor,acquisition_type[1], accession_number, 'Representation of %s|Reproduction of %s' % (reference_number, parent), ''))
accession_cmd = [
os.path.dirname(sip_path), '-user', user,
'-force',
'-number', accession_number,
'-reference', reference_number,
'-register', register,
'-filmo_csv', args.filmo_csv,
'-pbcore'
]
if not parent.lower() == 'n/a':
accession_cmd.extend(['-parent', parent])
accession_cmd.extend(['-donor', donor])
accession_cmd.extend(['-depositor_reference', depositor_reference])
accession_cmd.extend(['-acquisition_type', acquisition_type[2]])
accession_cmd.extend(['-donation_date', donation_date])
accession_cmd.extend(['-reproduction_creator', reproduction_creator])
print(accession_cmd)
accession.main(accession_cmd)
return new_log_textfile, new_manifest_textfile
if __name__ == '__main__':
main(sys.argv[1:])
|
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements a FloatWithUnit, which is a subclass of float. It
also defines supported units for some commonly used units for energy, length,
temperature, time and charge. FloatWithUnit also support conversion to one
another, and additions and subtractions perform automatic conversion if
units are detected. An ArrayWithUnit is also implemented, which is a subclass
of numpy's ndarray with similar unit features.
"""
from six.moves import filter, zip
__author__ = "Shyue Ping Ong, Matteo Giantomassi"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong, Matteo Giantomassi"
__status__ = "Production"
__date__ = "Aug 30, 2013"
import numpy as np
import six
import collections
from numbers import Number
import numbers
from functools import partial
from pymatgen.core.physical_constants import N_a, e
import re
"""
Some conversion factors
"""
Ha_to_eV = 27.21138386
eV_to_Ha = 1 / Ha_to_eV
Ry_to_eV = Ha_to_eV / 2
amu_to_kg = 1.660538921e-27
mile_to_meters = 1609.347219
bohr_to_angstrom = 0.5291772083
bohr_to_ang = bohr_to_angstrom
"""
Definitions of supported units. Values below are essentially scaling and
conversion factors. What matters is the relative values, not the absolute.
The SI units must have factor 1.
"""
BASE_UNITS = {
"length": {
"m": 1,
"km": 1000,
"mile": mile_to_meters,
"ang": 1e-10,
"cm": 1e-2,
"pm": 1e-12,
"bohr": bohr_to_angstrom * 1e-10,
},
"mass": {
"kg": 1,
"g": 1e-3,
"amu": amu_to_kg,
},
"time": {
"s": 1,
"min": 60,
"h": 3600,
},
"current": {
"A": 1
},
"temperature": {
"K": 1,
},
"amount": {
"mol": 1,
"atom": 1 / N_a
},
"intensity": {
"cd": 1
},
"memory": {
"byte": 1,
"Kb": 1024,
"Mb": 1024**2,
"Gb": 1024**3,
"Tb": 1024**4,
},
}
# Accept kb, mb, gb ... as well.
BASE_UNITS["memory"].update({k.lower(): v for k, v in BASE_UNITS["memory"].items()})
#This current list are supported derived units defined in terms of powers of
#SI base units and constants.
DERIVED_UNITS = {
"energy": {
"eV": {"kg": 1, "m": 2, "s": -2, e: 1},
"Ha": {"kg": 1, "m": 2, "s": -2, e * Ha_to_eV: 1},
"Ry": {"kg": 1, "m": 2, "s": -2, e * Ry_to_eV: 1},
"J": {"kg": 1, "m": 2, "s": -2},
"kJ": {"kg": 1, "m": 2, "s": -2, 1000: 1}
},
"charge": {
"C": {"A": 1, "s": 1},
"e": {"A": 1, "s": 1, e: 1},
},
"force": {
"N": {"kg": 1, "m": 1, "s": -2}
},
"pressure": {
"Pa": {"kg": 1, "m": -1, "s": -2},
"KPa": {"kg": 1, "m": -1, "s": -2, 1000: 1},
"MPa": {"kg": 1, "m": -1, "s": -2, 1e6: 1},
"GPa": {"kg": 1, "m": -1, "s": -2, 1e9: 1}
},
"power": {
"W": {"m": 2, "kg": 1, "s": -3}
},
"emf": {
"V": {"m": 2, "kg": 1, "s": -3, "A": -1}
},
"capacitance": {
"F": {"m": -2, "kg": -1, "s": 4, "A": 2}
},
"resistance": {
"ohm": {"m": 2, "kg": 1, "s": -3, "A": -2}
},
"conductance": {
"S": {"m": -2, "kg": -1, "s": 3, "A": 2}
},
"magnetic_flux": {
"Wb": {"m": 2, "kg": 1, "s": -2, "A": -1}
}
}
ALL_UNITS = dict(list(BASE_UNITS.items()) + list(DERIVED_UNITS.items()))
# Mapping unit name --> unit type (unit names must be unique).
_UNAME2UTYPE = {}
for utype, d in ALL_UNITS.items():
assert not set(d.keys()).intersection(_UNAME2UTYPE.keys())
_UNAME2UTYPE.update({uname: utype for uname in d})
del utype, d
def _get_si_unit(unit):
unit_type = _UNAME2UTYPE[unit]
si_unit = filter(lambda k: BASE_UNITS[unit_type][k] == 1,
BASE_UNITS[unit_type].keys())
return list(si_unit)[0], BASE_UNITS[unit_type][unit]
class UnitError(BaseException):
"""
Exception class for unit errors.
"""
def check_mappings(u):
for v in DERIVED_UNITS.values():
for k2, v2 in v.items():
if all([v2.get(ku, 0) == vu for ku, vu in u.items()]) and \
all([u.get(kv2, 0) == vv2 for kv2, vv2 in v2.items()]):
return {k2: 1}
return u
class Unit(collections.Mapping):
"""
Represents a unit, e.g., "m" for meters, etc. Supports compound units.
Only integer powers are supported for units.
"""
Error = UnitError
def __init__(self, unit_def):
"""
Constructs a unit.
Args:
unit_def: A definition for the unit. Either a mapping of unit to
powers, e.g., {"m": 2, "s": -1} represents "m^2 s^-1",
or simply as a string "kg m^2 s^-1". Note that the supported
format uses "^" as the power operator and all units must be
space-separated.
"""
if isinstance(unit_def, six.string_types):
unit = collections.defaultdict(int)
for m in re.finditer("([A-Za-z]+)\s*\^*\s*([\-0-9]*)", unit_def):
p = m.group(2)
p = 1 if not p else int(p)
k = m.group(1)
unit[k] += p
else:
unit = {k: v for k, v in dict(unit_def).items() if v != 0}
self._unit = check_mappings(unit)
def __mul__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] += v
return Unit(new_units)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] -= v
return Unit(new_units)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, i):
return Unit({k: v * i for k, v in self.items()})
def __iter__(self):
return self._unit.__iter__()
def __getitem__(self, i):
return self._unit[i]
def __len__(self):
return len(self._unit)
def __repr__(self):
sorted_keys = sorted(self._unit.keys(),
key=lambda k: (-self._unit[k], k))
return " ".join(["{}^{}".format(k, self._unit[k])
if self._unit[k] != 1 else k
for k in sorted_keys if self._unit[k] != 0])
def __str__(self):
return self.__repr__()
@property
def as_base_units(self):
"""
Converts all units to base SI units, including derived units.
Returns:
(base_units_dict, scaling factor). base_units_dict will not
contain any constants, which are gathered in the scaling factor.
"""
b = collections.defaultdict(int)
factor = 1
for k, v in self.items():
derived = False
for d in DERIVED_UNITS.values():
if k in d:
for k2, v2 in d[k].items():
if isinstance(k2, Number):
factor *= k2 ** (v2 * v)
else:
b[k2] += v2 * v
derived = True
break
if not derived:
si, f = _get_si_unit(k)
b[si] += v
factor *= f ** v
return {k: v for k, v in b.items() if v != 0}, factor
def get_conversion_factor(self, new_unit):
"""
Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit.
"""
uo_base, ofactor = self.as_base_units
un_base, nfactor = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
units_old = sorted(uo_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
factor = ofactor / nfactor
for uo, un in zip(units_old, units_new):
if uo[1] != un[1]:
raise UnitError("Units %s and %s are not compatible!" % (uo, un))
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= (c[uo[0]] / c[un[0]]) ** uo[1]
return factor
class FloatWithUnit(float):
"""
Subclasses float to attach a unit type. Typically, you should use the
pre-defined unit type subclasses such as Energy, Length, etc. instead of
using FloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity). Note that FloatWithUnit does not override the eq
method for float, i.e., units are not checked when testing for equality.
The reason is to allow this class to be used transparently wherever floats
are expected.
>>> e = Energy(1.1, "Ha")
>>> a = Energy(1.1, "Ha")
>>> b = Energy(3, "eV")
>>> c = a + b
>>> print(c)
1.2102479761938871 Ha
>>> c.to("eV")
32.932522246000005 eV
"""
Error = UnitError
@classmethod
def from_string(cls, s):
"""
Initialize a FloatWithUnit from a string. Example Memory.from_string("1. Mb")
"""
# Extract num and unit string.
s = s.strip()
for i, char in enumerate(s):
if char.isalpha() or char.isspace():
break
else:
raise Exception("Unit is missing in string %s" % s)
num, unit = float(s[:i]), s[i:]
# Find unit type (set it to None if it cannot be detected)
for unit_type, d in BASE_UNITS.items():
if unit in d:
break
else:
unit_type = None
return cls(num, unit, unit_type=unit_type)
def __new__(cls, val, unit, unit_type=None):
new = float.__new__(cls, val)
new._unit = Unit(unit)
new._unit_type = unit_type
return new
def __init__(self, val, unit, unit_type=None):
"""
Initializes a float with unit.
Args:
val (float): Value
unit (Unit): A unit. E.g., "C".
unit_type (str): A type of unit. E.g., "charge"
"""
if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:
raise UnitError(
"{} is not a supported unit for {}".format(unit, unit_type))
self._unit = Unit(unit)
self._unit_type = unit_type
def __repr__(self):
return super(FloatWithUnit, self).__repr__()
def __str__(self):
s = super(FloatWithUnit, self).__str__()
return "{} {}".format(s, self._unit)
def __add__(self, other):
if not hasattr(other, "unit_type"):
return super(FloatWithUnit, self).__add__(other)
if other.unit_type != self._unit_type:
raise UnitError("Adding different types of units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) + val, unit_type=self._unit_type,
unit=self._unit)
def __sub__(self, other):
if not hasattr(other, "unit_type"):
return super(FloatWithUnit, self).__sub__(other)
if other.unit_type != self._unit_type:
raise UnitError("Subtracting different units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) - val, unit_type=self._unit_type,
unit=self._unit)
def __mul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __rmul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __pow__(self, i):
return FloatWithUnit(float(self) ** i, unit_type=None,
unit=self._unit ** i)
def __div__(self, other):
val = super(FloatWithUnit, self).__div__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(val, unit_type=None,
unit=self._unit / other._unit)
def __truediv__(self, other):
val = super(FloatWithUnit, self).__truediv__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(val, unit_type=None,
unit=self._unit / other._unit)
def __neg__(self):
return FloatWithUnit(super(FloatWithUnit, self).__neg__(),
unit_type=self._unit_type,
unit=self._unit)
def __getnewargs__(self):
"""Function used by pickle to recreate object."""
#print(self.__dict__)
# FIXME
# There's a problem with _unit_type if we try to unpickle objects from file.
# since self._unit_type might not be defined. I think this is due to
# the use of decorators (property and unitized). In particular I have problems with "amu"
# likely due to weight in core.composition
if hasattr(self, "_unit_type"):
args = float(self), self._unit, self._unit_type
else:
args = float(self), self._unit, None
return args
def __getstate__(self):
state = self.__dict__.copy()
state["val"] = float(self)
#print("in getstate %s" % state)
return state
def __setstate__(self, state):
#print("in setstate %s" % state)
self._unit = state["_unit"]
@property
def unit_type(self):
return self._unit_type
@property
def unit(self):
return self._unit
def to(self, new_unit):
"""
Conversion to a new_unit. Right now, only supports 1 to 1 mapping of
units of each type.
Args:
new_unit: New unit type.
Returns:
A FloatWithUnit object in the new units.
Example usage:
>>> e = Energy(1.1, "eV")
>>> e = Energy(1.1, "Ha")
>>> e.to("eV")
29.932522246 eV
"""
return FloatWithUnit(
self * self.unit.get_conversion_factor(new_unit),
unit_type=self._unit_type,
unit=new_unit)
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return tuple(ALL_UNITS[self._unit_type].keys())
class ArrayWithUnit(np.ndarray):
"""
Subclasses `numpy.ndarray` to attach a unit type. Typically, you should
use the pre-defined unit type subclasses such as EnergyArray,
LengthArray, etc. instead of using ArrayWithFloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity).
>>> a = EnergyArray([1, 2], "Ha")
>>> b = EnergyArray([1, 2], "eV")
>>> c = a + b
>>> print(c)
[ 1.03674933 2.07349865] Ha
>>> c.to("eV")
array([ 28.21138386, 56.42276772]) eV
"""
Error = UnitError
def __new__(cls, input_array, unit, unit_type=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attributes to the created instance
obj._unit = Unit(unit)
obj._unit_type = unit_type
return obj
def __array_finalize__(self, obj):
"""
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html for
comments.
"""
if obj is None:
return
self._unit = getattr(obj, "_unit", None)
self._unit_type = getattr(obj, "_unit_type", None)
#TODO abstract base class property?
@property
def unit_type(self):
return self._unit_type
#TODO abstract base class property?
@property
def unit(self):
return self._unit
def __reduce__(self):
#print("in reduce")
reduce = list(super(ArrayWithUnit, self).__reduce__())
#print("unit",self._unit)
#print(reduce[2])
reduce[2] = {"np_state": reduce[2], "_unit": self._unit}
return tuple(reduce)
def __setstate__(self, state):
#print("in setstate %s" % str(state))
super(ArrayWithUnit, self).__setstate__(state["np_state"])
self._unit = state["_unit"]
def __repr__(self):
return "{} {}".format(np.array(self).__repr__(), self.unit)
def __str__(self):
return "{} {}".format(np.array(self).__str__(), self.unit)
def __add__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Adding different types of units is"
" not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) + np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __sub__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Subtracting different units is not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) - np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __mul__(self, other):
# FIXME
# Here we have the most important difference between FloatWithUnit and
# ArrayWithFloatWithUnit:
# If other does not have units, I return an object with the same units
# as self.
# if other *has* units, I return an object *without* units since
# taking into account all the possible derived quantities would be
# too difficult.
# Moreover Energy(1.0) * Time(1.0, "s") returns 1.0 Ha that is a
# bit misleading.
# Same protocol for __div__
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__mul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
# Cannot use super since it returns an instance of self.__class__
# while here we want a bare numpy array.
return self.__class__(
np.array(self).__mul__(np.array(other)),
unit=self.unit * other.unit)
def __rmul__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__rmul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__rmul__(np.array(other)),
unit=self.unit * other.unit)
def __div__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__div__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__div__(np.array(other)),
unit=self.unit/other.unit)
def __truediv__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__truediv__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__truediv__(np.array(other)),
unit=self.unit / other.unit)
def __neg__(self):
return self.__class__(np.array(self).__neg__(),
unit_type=self.unit_type, unit=self.unit)
def to(self, new_unit):
"""
Conversion to a new_unit.
Args:
new_unit:
New unit type.
Returns:
A ArrayWithFloatWithUnit object in the new units.
Example usage:
>>> e = EnergyArray([1, 1.1], "Ha")
>>> e.to("eV")
array([ 27.21138386, 29.93252225]) eV
"""
return self.__class__(
np.array(self) * self.unit.get_conversion_factor(new_unit),
unit_type=self.unit_type, unit=new_unit)
#TODO abstract base class property?
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return ALL_UNITS[self.unit_type]
#TODO abstract base class method?
def conversions(self):
"""
Returns a string showing the available conversions.
Useful tool in interactive mode.
"""
return "\n".join(str(self.to(unit)) for unit in self.supported_units)
def _my_partial(func, *args, **kwargs):
"""
Partial returns a partial object and therefore we cannot inherit class methods defined in FloatWithUnit.
This function calls partial and patches the new class before returning.
"""
newobj = partial(func, *args, **kwargs)
# monkey patch
newobj.from_string = FloatWithUnit.from_string
return newobj
Energy = partial(FloatWithUnit, unit_type="energy")
"""
A float with an energy unit.
Args:
val (float): Value
unit (Unit): E.g., eV, kJ, etc. Must be valid unit or UnitError is raised.
"""
EnergyArray = partial(ArrayWithUnit, unit_type="energy")
Length = partial(FloatWithUnit, unit_type="length")
"""
A float with a length unit.
Args:
val (float): Value
unit (Unit): E.g., m, ang, bohr, etc. Must be valid unit or UnitError is
raised.
"""
LengthArray = partial(ArrayWithUnit, unit_type="length")
Mass = partial(FloatWithUnit, unit_type="mass")
"""
A float with a mass unit.
Args:
val (float): Value
unit (Unit): E.g., amu, kg, etc. Must be valid unit or UnitError is
raised.
"""
MassArray = partial(ArrayWithUnit, unit_type="mass")
Temp = partial(FloatWithUnit, unit_type="temperature")
"""
A float with a temperature unit.
Args:
val (float): Value
unit (Unit): E.g., K. Only K (kelvin) is supported.
"""
TempArray = partial(ArrayWithUnit, unit_type="temperature")
Time = partial(FloatWithUnit, unit_type="time")
"""
A float with a time unit.
Args:
val (float): Value
unit (Unit): E.g., s, min, h. Must be valid unit or UnitError is
raised.
"""
TimeArray = partial(ArrayWithUnit, unit_type="time")
Charge = partial(FloatWithUnit, unit_type="charge")
"""
A float with a charge unit.
Args:
val (float): Value
unit (Unit): E.g., C, e (electron charge). Must be valid unit or UnitError
is raised.
"""
ChargeArray = partial(ArrayWithUnit, unit_type="charge")
Memory = _my_partial(FloatWithUnit, unit_type="memory")
"""
A float with a memory unit.
Args:
val (float): Value
unit (Unit): E.g., Kb, Mb, Gb, Tb. Must be valid unit or UnitError
is raised.
"""
def obj_with_unit(obj, unit):
"""
Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of
objects with units if obj is a dict, else an instance of
`ArrayWithFloatWithUnit`.
Args:
unit: Specific units (eV, Ha, m, ang, etc.).
"""
unit_type = _UNAME2UTYPE[unit]
if isinstance(obj, numbers.Number):
return FloatWithUnit(obj, unit=unit, unit_type=unit_type)
elif isinstance(obj, collections.Mapping):
return {k: obj_with_unit(v, unit) for k,v in obj.items()}
else:
return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)
def unitized(unit):
"""
Useful decorator to assign units to the output of a function. For
sequences, all values in the sequences are assigned the same unit. It
works with Python sequences only. The creation of numpy arrays loses all
unit information. For mapping types, the values are assigned units.
Args:
unit: Specific unit (eV, Ha, m, ang, etc.).
Example usage::
@unitized(unit="kg")
def get_mass():
return 123.45
"""
def wrap(f):
def wrapped_f(*args, **kwargs):
val = f(*args, **kwargs)
#print(val)
unit_type = _UNAME2UTYPE[unit]
if isinstance(val, collections.Sequence):
# TODO: why don't we return a ArrayWithUnit?
# This complicated way is to ensure the sequence type is
# preserved (list or tuple).
return val.__class__([FloatWithUnit(i, unit_type=unit_type,
unit=unit) for i in val])
elif isinstance(val, collections.Mapping):
for k, v in val.items():
val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)
elif isinstance(val, numbers.Number):
return FloatWithUnit(val, unit_type=unit_type, unit=unit)
elif val is None:
pass
else:
raise TypeError("Don't know how to assign units to %s" % str(val))
return val
return wrapped_f
return wrap
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
'''
Created on Oct 7, 2015
@author: Lucas Lehnert ([email protected])
'''
import numpy as np
from mdp import mspbeStateActionValues, mstdeStateActionValues
def experimentSampleTransitions( iterations, mdp, pi, agent, errorMeasures=[] ):
'''
Sample transitions using the state distribution of the MDP, its transition model and the current policy (the
action value function is computed using the agent).
@param iterations: Maximum length of a trajectory.
@param mdp: MDP used to simulate trajectories.
@param pi: Policy used to select actions. The action-value function is computed using the agetn.getTheta() and
agent.getBasisFunction() methods.
@param agent: Control agent used for an experiment, must be an instance of qlearning.LinearQLearning
@param errorMeasures: List of error measures computed during the experiment. This is passed as a list of function pointers
with the agent being the only argument.
@return: errorBenchmarks, completed
errorBenchmarks containa a list of return values for the errorMeasure. completed is a boolean indicating if the
trajectory was completed or if a nan-error or value error was detected (indicating divergence).
'''
errorBenchmarks = []
for err in errorMeasures:
errorBenchmarks.append( [err( agent )] )
completed = False
try:
for _ in range( iterations ):
def q( s, a ):
return np.dot( agent.getTheta(), agent.getBasisFunction()( s, a ) )
s = mdp.sampleStartState()
a = pi.selectAction( s, q )
snext = mdp.sampleNextState( s, a )
reward = mdp.getReward( s, a, snext )
agent.updateTransition( s, a, reward, snext )
for i in range( len( errorBenchmarks ) ):
errorBenchmarks[i].append( errorMeasures[i]( agent ) )
completed = True
except Exception as e:
print e
completed = False
return errorBenchmarks, completed
def experimentSimulateTransitions( iterations, mdp, pi, agent, errorMeasures=[], transitionListener=[], actionFromAgent=False ):
'''
Simulate transitions through an MDP with the given policy and agent.
@param iterations: Maximum length of a trajectory.
@param mdp: MDP used to simulate trajectories.
@param pi: Policy used to select actions. The action-value function is computed using the agetn.getTheta() and
agent.getBasisFunction() methods.
@param agent: Control agent used for an experiment, must be an instance of qlearning.LinearQLearning
@param errorMeasures: List of error measures computed during the experiment. This is passed as a list of function pointers
with the agent being the only argument.
@param transitionListener: List of function pointers listening to MDP transitions. The function signature is fn(s,a,r,sn).
@param actionFromAgent: Boolean, default is false. If true, obtain the next action by calling agent.getNextAction().
@return: errorBenchmarks, transitionBenchmark, completed
errorBenchmarks and transitionBenchmark contain each a list of return values for the errorMeasure and
transitionListener. completed is a boolean indicating if the trajectory was completed or if a nan-error
or value error was detected (indicating divergence).
'''
errorBenchmarks = []
for err in errorMeasures:
errorBenchmarks.append( [err( agent )] )
transitionBenchmark = []
for _ in transitionListener:
transitionBenchmark.append( [] )
completed = False
try:
s = mdp.sampleStartState()
for i in range( iterations ):
if actionFromAgent and i > 0:
a = agent.getNextAction()
else:
def q( s, a ):
return np.dot( agent.getTheta(), agent.getBasisFunction()( s, a ) )
a = pi.selectAction( s, q )
snext = mdp.sampleNextState( s, a )
reward = mdp.getReward( s, a, snext )
agent.updateTransition( s, a, reward, snext )
for i in range( len( errorBenchmarks ) ):
errorBenchmarks[i].append( errorMeasures[i]( agent ) )
for i in range( len( transitionBenchmark ) ):
transitionBenchmark[i].append( transitionListener[i]( s, a, snext, reward ) )
s = snext
if mdp.isGoalState( s ):
break
completed = True
except Exception as e:
print e
completed = False
return errorBenchmarks, transitionBenchmark, completed
def experimentDynamicProgrammingSweeps( sweeps, mdp, pi, agent, errorMeasures=[], d_s=None ):
'''
@deprecated: Delete this.
'''
if d_s is None:
d_s = np.ones( len( mdp.getStateSpace() ) ) / float( len( mdp.getStateSpace() ) )
errorBenchmarks = []
for err in errorMeasures:
errorBenchmarks.append( [err( agent )] )
for _ in range( sweeps ):
def q( s, a ):
return np.dot( agent.getTheta(), agent.getBasisFunction()( s, a ) )
est = np.zeros( agent.getEstimateDim() )
for s, a in mdp.getStateActionPairIterable():
ind_s = mdp.indexOfState( s )
nextStateDistr = mdp.getNextStateDistribution( s, a )
for snext in mdp.getStateSpace():
ind_snext = mdp.indexOfState( snext )
prob = d_s[ind_s] * pi.selectionProbability( s, a, q ) * nextStateDistr[ind_snext]
if prob == 0.0:
continue
reward = mdp.getReward( s, a, snext )
est += prob * agent.getUpdate( s, a, reward, snext )
agent.updateEstimates( est )
for i in range( len( errorBenchmarks ) ):
try:
errorBenchmarks[i].append( errorMeasures[i]( agent ) )
except:
errorBenchmarks[i].append( np.NaN )
return errorBenchmarks
def evaluateQlearningDP( numberOfSweeps, agent, mdp, phi, piBehavior, piTarget, stationary_distribution=None ):
'''
@deprecated: Delete this.
'''
if stationary_distribution is None:
stationary_distribution = np.ones( len( mdp.getStateSpace() ) * len( mdp.getActionSpace() ) )
mspbe = lambda t : mspbeStateActionValues( t, mdp, phi, piTarget, parametricPolicy=True, d_sa=stationary_distribution )
mspbeList = [ mspbe( agent.getTheta() ) ]
mstde = lambda t : mstdeStateActionValues( t, mdp, phi, piTarget, parametricPolicy=True, d_sa=stationary_distribution )
mstdeList = [ mstde( agent.getTheta() ) ]
thetaList = [ agent.getTheta() ]
# dsa = np.ones( len( mdp.getStateSpace() ) * len( mdp.getActionSpace() ) )
# dsa /= np.sum( dsa )
dsa = stationary_distribution
for _ in range( numberOfSweeps ):
try:
for s, a in mdp.getStateActionPairIterable():
sai = mdp.indexOfStateActionPair( ( s, a ) )
prob = dsa[sai]
def q( s, a ):
return np.dot( agent.getTheta(), agent.phi( s, a ) )
grad_theta = np.zeros( agent.getEstimateDim() )
nextStateDistr = mdp.getNextStateDistribution( s, a )
for snext in mdp.getStateSpace():
sj = mdp.indexOfState( snext )
prob_transition = nextStateDistr[sj]
if prob_transition == 0.0:
continue
reward = mdp.getReward( s, a, snext )
grad_theta = grad_theta + prob_transition * agent.getUpdate( s, a, reward, snext )
agent.updateEstimates( prob * grad_theta )
# print 's,a: ' + str( s ) + ',' + str( a ) + ', theta=' + str( agent.getTheta() )
except ValueError, e:
print e
continue
mspbeList.append( mspbe( agent.getTheta() ) )
mstdeList.append( mstde( agent.getTheta() ) )
thetaList.append( agent.getTheta() )
return mspbeList, mstdeList, thetaList
def evaluateQlearningSimulated( numberOfEpisodes, agent, mdp, phi, piBehavior, piTarget, max_episode_length=10000 ):
'''
@deprecated: Delete this.
'''
mspbe = lambda t : mspbeStateActionValues( t, mdp, phi, piTarget, parametricPolicy=True )
mstde = lambda t : mstdeStateActionValues( t, mdp, phi, piTarget, parametricPolicy=True )
mspbeListEp = [ mspbe( agent.getTheta() ) ]
mstdeListEp = [ mstde( agent.getTheta() ) ]
thetaListEp = [agent.getTheta()]
episodeLength = []
for epInd in range( numberOfEpisodes ):
print 'Running episode ' + str( epInd )
try:
s = mdp.sampleStartState()
t = 0
while not mdp.isGoalState( s ):
def q( s, a ):
return np.dot( agent.getTheta(), agent.getBasisFunction()( s, a ) )
a = piBehavior.selectAction( s, actionValueFunction=q )
snext = mdp.sampleNextState( s, a )
reward = mdp.getReward( s, a, snext )
agent.updateTransition( s, a, reward, snext )
s = snext
t += 1
if t >= max_episode_length:
break
print 'Episode length: ' + str( t + 1 )
mspbeListEp.append( mspbe( agent.getTheta() ) )
mstdeListEp.append( mstde( agent.getTheta() ) )
thetaListEp.append( agent.getTheta() )
episodeLength.append( t + 1 )
except ValueError, e:
print e
continue
resultDict = { 'episodeLength' : episodeLength, 'mspbeEp' : mspbeListEp, 'mstdeEp' : mstdeListEp, 'thetaEp' : thetaListEp }
return resultDict
|
|
from collections import defaultdict
from datetime import date, datetime, timedelta
import json
import logging
import os
import re
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from google.appengine.ext.webapp import template
from consts.award_type import AwardType
from consts.client_type import ClientType
from consts.district_type import DistrictType
from consts.event_type import EventType
from consts.playoff_type import PlayoffType
from controllers.base_controller import LoggedInHandler
from database import match_query
from database.event_query import DistrictEventsQuery, EventListQuery
from database.district_query import DistrictChampsInYearQuery
from helpers.award_manipulator import AwardManipulator
from helpers.district_manipulator import DistrictManipulator
from helpers.district_team_manipulator import DistrictTeamManipulator
from helpers.event_manipulator import EventManipulator
from helpers.event_team_manipulator import EventTeamManipulator
from helpers.match_helper import MatchHelper
from helpers.notification_sender import NotificationSender
from helpers.playoff_advancement_helper import PlayoffAdvancementHelper
from helpers.search_helper import SearchHelper
from helpers.team_manipulator import TeamManipulator
from models.award import Award
from models.district import District
from models.district_team import DistrictTeam
from models.event import Event
from models.event_team import EventTeam
from models.mobile_client import MobileClient
from models.sitevar import Sitevar
from models.subscription import Subscription
from models.team import Team
class AdminMobileClearEnqueue(LoggedInHandler):
"""
Clears mobile clients with duplicate client_ids
Will leave the most recently updated one
"""
def get(self):
self._require_admin()
taskqueue.add(
queue_name='admin',
url='/tasks/admin/clear_mobile_duplicates',
method='GET')
path = os.path.join(os.path.dirname(__file__), '../../templates/admin/mobile_clear_enqueue.html')
self.response.out.write(template.render(path, self.template_values))
class AdminMobileClear(LoggedInHandler):
"""
Fetch all mobile clients, order by messaging ID, then update time (desc).
If the current client has the same ID as the last one (which is always going to be newer), mark the current one to be removed
"""
def get(self):
clients = MobileClient.query().fetch()
clients = sorted(clients, key=lambda x: (x.messaging_id, x.updated))
last = None
to_remove = []
last = None
for client in clients:
if last is not None and client.messaging_id == last.messaging_id:
logging.info("Removing")
to_remove.append(client.key)
last = client
count = len(to_remove)
if to_remove:
ndb.delete_multi(to_remove)
logging.info("Removed {} duplicate mobile clients".format(count))
template_values = {'count': count}
path = os.path.join(os.path.dirname(__file__), '../../templates/admin/mobile_clear_do.html')
self.response.out.write(template.render(path, template_values))
class AdminSubsClearEnqueue(LoggedInHandler):
"""
Removes subscriptions to past years' things
"""
def get(self):
self._require_admin()
taskqueue.add(
queue_name='admin',
url='/tasks/admin/clear_old_subs',
method='GET')
path = os.path.join(os.path.dirname(__file__), '../../templates/admin/subs_clear_enqueue.html')
self.response.out.write(template.render(path, self.template_values))
class AdminSubsClear(LoggedInHandler):
def get(self):
year = date.today().year - 1
# Compile key regex
# Matches event (2014ctgro), team@event (frc2014_2014ctgro), firehose (2014*)
ps = "^{}[a-z]+|_{}[a-z]+|{}\*$".format(year, year, year)
logging.info("Pattern: {}".format(ps))
p = re.compile(ps)
subs = Subscription.query().fetch()
to_delete = []
for sub in subs:
if p.match(sub.model_key):
to_delete.append(sub.key)
count = len(to_delete)
if to_delete:
ndb.delete_multi(to_delete)
logging.info("Removed {} old subscriptions".format(count))
template_values = {'count': count}
path = os.path.join(os.path.dirname(__file__), '../../templates/admin/subs_clear_do.html')
self.response.out.write(template.render(path, template_values))
class AdminWebhooksClearEnqueue(LoggedInHandler):
"""
Tries to ping every webhook and removes ones that don't respond
"""
def get(self):
self._require_admin()
taskqueue.add(
queue_name='admin',
url='/tasks/admin/clear_old_webhooks',
method='GET')
path = os.path.join(os.path.dirname(__file__), '../../templates/admin/webhooks_clear_enqueue.html')
self.response.out.write(template.render(path, self.template_values))
class AdminWebhooksClear(LoggedInHandler):
def get(self):
webhooks = MobileClient.query(MobileClient.client_type == ClientType.WEBHOOK).fetch()
failures = []
from helpers.tbans_helper import TBANSHelper
for client in webhooks:
if not TBANSHelper.ping(client):
failures.append(client.key)
count = len(failures)
if failures:
ndb.delete_multi(failures)
logging.info("Deleted {} broken webhooks".format(count))
template_values = {'count': count}
path = os.path.join(os.path.dirname(__file__), '../../templates/admin/webhooks_clear_do.html')
self.response.out.write(template.render(path, template_values))
class AdminCreateDistrictTeamsEnqueue(LoggedInHandler):
"""
Trying to Enqueue a task to rebuild old district teams from event teams.
"""
def get(self, year):
self._require_admin()
taskqueue.add(
queue_name='admin',
target='backend-tasks',
url='/backend-tasks/do/rebuild_district_teams/{}'.format(year),
method='GET')
self.response.out.write("Enqueued district teams for {}".format(year))
class AdminRebuildDivisionsEnqueue(LoggedInHandler):
"""
Enqueue a task to build past event parent/child relationships
"""
def get(self, year):
self._require_admin()
taskqueue.add(
queue_name='admin',
target='backend-tasks',
url='/backend-tasks/do/rebuild_divisions/{}'.format(year),
method='GET')
class AdminRebuildDivisionsDo(LoggedInHandler):
"""
Add in event parent/child relationships
Map CMP_DIVISION -> CMP_FINALS and DCMP_DIVISION -> DCMP (in the same district)
Ensure all events end on the same day, to account for #2champz
"""
TYPE_MAP = {
EventType.CMP_DIVISION: EventType.CMP_FINALS,
EventType.DISTRICT_CMP_DIVISION: EventType.DISTRICT_CMP,
}
def get(self, year):
self._require_admin()
year = int(year)
events = EventListQuery(year).fetch()
events_by_type = defaultdict(list)
for event in events:
if event.event_type_enum in self.TYPE_MAP.keys() or event.event_type_enum in self.TYPE_MAP.values():
events_by_type[event.event_type_enum].append(event)
output = ""
for from_type, to_type in self.TYPE_MAP.iteritems():
for event in events_by_type[to_type]:
divisions = []
for candidate_division in events_by_type[from_type]:
if candidate_division.end_date.date() == event.end_date.date() and candidate_division.district_key == event.district_key:
candidate_division.parent_event = event.key
divisions.append(candidate_division.key)
output += "Event {} is the parent of {}<br/>".format(event.key_name, candidate_division.key_name)
EventManipulator.createOrUpdate(candidate_division)
event.divisions = divisions
if divisions:
output += "Divisions {} added to {}<br/>".format(event.division_keys_json, event.key_name)
EventManipulator.createOrUpdate(event)
self.response.out.write(output)
class AdminBackfillPlayoffTypeEnqueue(LoggedInHandler):
"""
Enqueue a task to build past event parent/child relationships
"""
def get(self, year):
self._require_admin()
taskqueue.add(
queue_name='admin',
target='backend-tasks',
url='/backend-tasks/do/backfill_playoff_type/{}'.format(year),
method='GET')
class AdminBackfillPlayoffTypeDo(LoggedInHandler):
"""
Set playoff types
"""
# These offseasons played the 2014 game
EXCEPTIONS_2015 = ['2015cc', '2015cacc', '2015mttd']
def get(self, year):
self._require_admin()
year = int(year)
events = EventListQuery(year).fetch()
for event in events:
if not event.playoff_type:
if event.year == 2015 and event.key_name not in self.EXCEPTIONS_2015:
event.playoff_type = PlayoffType.AVG_SCORE_8_TEAM
else:
event.playoff_type = PlayoffType.BRACKET_8_TEAM
EventManipulator.createOrUpdate(event)
self.response.out.write("Update {} events".format(len(events)))
class AdminClearEventTeamsDo(LoggedInHandler):
"""
Remove all eventteams from an event
"""
def get(self, event_key):
self._require_admin()
event = Event.get_by_id(event_key)
if not event:
self.abort(404)
return
existing_event_team_keys = set(EventTeam.query(EventTeam.event == event.key).fetch(1000, keys_only=True))
EventTeamManipulator.delete_keys(existing_event_team_keys)
self.response.out.write("Deleted {} EventTeams from {}".format(len(existing_event_team_keys), event_key))
class AdminCreateDistrictTeamsDo(LoggedInHandler):
def get(self, year):
year = int(year)
team_districts = defaultdict(list)
logging.info("Fetching events in {}".format(year))
year_events = Event.query(year == Event.year, Event.district_key == None, Event.event_district_enum != None).fetch()
for event in year_events:
logging.info("Fetching EventTeams for {}".format(event.key_name))
event_teams = EventTeam.query(EventTeam.event == event.key).fetch()
for event_team in event_teams:
team_districts[event_team.team.id()].append(event.district_key.id())
new_district_teams = []
for team_key, districts in team_districts.iteritems():
most_frequent_district_key = max(set(districts), key=districts.count)
logging.info("Assuming team {} belongs to {}".format(team_key, most_frequent_district_key))
dt_key = DistrictTeam.renderKeyName(most_frequent_district_key, team_key)
new_district_teams.append(DistrictTeam(id=dt_key, year=year, team=ndb.Key(Team, team_key), district_key=ndb.Key(District, most_frequent_district_key)))
logging.info("Finishing updating old district teams from event teams")
DistrictTeamManipulator.createOrUpdate(new_district_teams)
self.response.out.write("Finished creating district teams for {}".format(year))
class AdminCreateDistrictsEnqueue(LoggedInHandler):
"""
Create District models from old DCMPs
"""
def get(self, year):
self._require_admin()
taskqueue.add(
queue_name='admin',
target='backend-tasks',
url='/backend-tasks-b2/do/rebuild_districts/{}'.format(year),
method='GET'
)
self.response.out.write("Enqueued district creation for {}".format(year))
class AdminCreateDistrictsDo(LoggedInHandler):
def get(self, year):
year = int(year)
year_dcmps = DistrictChampsInYearQuery(year).fetch()
districts_to_write = []
for dcmp in year_dcmps:
district_abbrev = DistrictType.type_abbrevs[dcmp.event_district_enum]
district_key = District.renderKeyName(year, district_abbrev)
logging.info("Creating {}".format(district_key))
district = District(
id=district_key,
year=year,
abbreviation=district_abbrev,
display_name=DistrictType.type_names[dcmp.event_district_enum],
elasticsearch_name=next((k for k, v in DistrictType.elasticsearch_names.iteritems() if v == dcmp.event_district_enum), None)
)
districts_to_write.append(district)
logging.info("Writing {} new districts".format(len(districts_to_write)))
DistrictManipulator.createOrUpdate(districts_to_write, run_post_update_hook=False)
for dcmp in year_dcmps:
district_abbrev = DistrictType.type_abbrevs[dcmp.event_district_enum]
district_key = District.renderKeyName(year, district_abbrev)
district_events_future = DistrictEventsQuery(district_key).fetch_async()
district_events = district_events_future.get_result()
logging.info("Found {} events to update".format(len(district_events)))
events_to_write = []
for event in district_events:
event.district_key = ndb.Key(District, district_key)
events_to_write.append(event)
EventManipulator.createOrUpdate(events_to_write)
for dcmp in year_dcmps:
district_abbrev = DistrictType.type_abbrevs[dcmp.event_district_enum]
district_key = District.renderKeyName(year, district_abbrev)
districtteams_future = DistrictTeam.query(DistrictTeam.year == year, DistrictTeam.district == DistrictType.abbrevs.get(district_abbrev, None)).fetch_async()
districtteams = districtteams_future.get_result()
logging.info("Found {} DistrictTeams to update".format(len(districtteams)))
districtteams_to_write = []
for districtteam in districtteams:
districtteam.district_key = ndb.Key(District, district_key)
districtteams_to_write.append(districtteam)
DistrictTeamManipulator.createOrUpdate(districtteams_to_write)
class AdminPostEventTasksDo(LoggedInHandler):
"""
Runs cleanup tasks after an event is over if necessary
"""
def get(self, event_key):
# Fetch for later
event_future = Event.get_by_id_async(event_key)
matches_future = match_query.EventMatchesQuery(event_key).fetch_async()
# Rebuild event teams
taskqueue.add(
url='/tasks/math/do/eventteam_update/' + event_key,
method='GET')
# Create Winner/Finalist awards for offseason events
awards = []
event = event_future.get_result()
if event.event_type_enum in {EventType.OFFSEASON, EventType.FOC}:
matches = MatchHelper.organizeMatches(matches_future.get_result())
bracket = PlayoffAdvancementHelper.generateBracket(matches, event, event.alliance_selections)
if 'f' in bracket:
winning_alliance = '{}_alliance'.format(bracket['f']['f1']['winning_alliance'])
if winning_alliance == 'red_alliance':
losing_alliance = 'blue_alliance'
else:
losing_alliance = 'red_alliance'
awards.append(Award(
id=Award.render_key_name(event.key_name, AwardType.WINNER),
name_str="Winner",
award_type_enum=AwardType.WINNER,
year=event.year,
event=event.key,
event_type_enum=event.event_type_enum,
team_list=[ndb.Key(Team, 'frc{}'.format(team)) for team in bracket['f']['f1'][winning_alliance] if team.isdigit()],
recipient_json_list=[json.dumps({'team_number': team, 'awardee': None}) for team in bracket['f']['f1'][winning_alliance]],
))
awards.append(Award(
id=Award.render_key_name(event.key_name, AwardType.FINALIST),
name_str="Finalist",
award_type_enum=AwardType.FINALIST,
year=event.year,
event=event.key,
event_type_enum=event.event_type_enum,
team_list=[ndb.Key(Team, 'frc{}'.format(team)) for team in bracket['f']['f1'][losing_alliance] if team.isdigit()],
recipient_json_list=[json.dumps({'team_number': team, 'awardee': None}) for team in bracket['f']['f1'][losing_alliance]],
))
AwardManipulator.createOrUpdate(awards)
self.response.out.write("Finished post-event tasks for {}. Created awards: {}".format(event_key, awards))
class AdminRegistrationDayEnqueue(LoggedInHandler):
def post(self):
"""
Configures scheduling a registration day in advance
This will enqueue the requested year's event details task every X minutes
Also updates the "short cache" sitevar to reduce timeouts for that day
:param date_string: YYYY-mm-dd formatted day on which we poll faster
:param event_year: The year of events to fetch
:param interval: How many minutes between fetches
"""
self._require_admin()
date_string = self.request.get("date_string")
event_year = self.request.get("event_year")
interval = self.request.get("interval")
start = datetime.strptime(date_string, "%Y-%m-%d")
event_year = int(event_year)
interval = int(interval)
# Enqueue the tasks
now = datetime.now()
for i in xrange(0, 24*60, interval):
# 24*60 is number of minutes per day
task_eta = start + timedelta(minutes=i)
if task_eta < now:
# Don't enqueue tasks in the past
continue
taskqueue.add(
queue_name='datafeed',
target='backend-tasks',
url='/backend-tasks/get/event_list/{}'.format(event_year),
eta=task_eta,
method='GET'
)
# Set the cache timeout sitevar
end_timestamp = (start + timedelta(days=1) - datetime(1970, 1, 1)).total_seconds()
cache_key_regex = ".*{}.*".format(event_year)
turbo_mode_json = {
'regex': cache_key_regex,
'valid_until': int(end_timestamp),
'cache_length': 61
}
turbo_sitevar = Sitevar.get_or_insert('turbo_mode', description="Temporarily shorten cache expiration")
turbo_sitevar.contents = turbo_mode_json
turbo_sitevar.put()
self.response.out.write("Enqueued {} tasks to update {} events starting at {}".format((24*60/interval), event_year, start))
class AdminRunPostUpdateHooksEnqueue(LoggedInHandler):
def get(self, model_type):
if model_type == 'events':
taskqueue.add(
queue_name='admin',
url='/tasks/admin/do/run_post_update_hooks/events',
method='GET')
self.response.out.write("Enqueued run post update hooks for events")
elif model_type == 'teams':
taskqueue.add(
queue_name='admin',
url='/tasks/admin/do/run_post_update_hooks/teams',
method='GET')
self.response.out.write("Enqueued run post update hooks for teams")
else:
self.response.out.write("Unknown model type: {}".format(model_type))
class AdminRunPostUpdateHooksDo(LoggedInHandler):
def get(self, model_type):
if model_type == 'events':
event_keys = Event.query().fetch(keys_only=True)
for event_key in event_keys:
taskqueue.add(
queue_name='admin',
url='/tasks/admin/do/run_event_post_update_hook/' + event_key.id(),
method='GET')
elif model_type == 'teams':
team_keys = Team.query().fetch(keys_only=True)
for team_key in team_keys:
taskqueue.add(
queue_name='admin',
url='/tasks/admin/do/run_team_post_update_hook/' + team_key.id(),
method='GET')
class AdminRunEventPostUpdateHookDo(LoggedInHandler):
def get(self, event_key):
event = Event.get_by_id(event_key)
EventManipulator.runPostUpdateHook([event])
class AdminRunTeamPostUpdateHookDo(LoggedInHandler):
def get(self, team_key):
team = Team.get_by_id(team_key)
TeamManipulator.runPostUpdateHook([team])
class AdminUpdateAllTeamSearchIndexEnqueue(LoggedInHandler):
def get(self):
taskqueue.add(
queue_name='search-index-update',
url='/tasks/do/update_all_team_search_index',
method='GET')
self.response.out.write("Enqueued update all team search index")
class AdminUpdateAllTeamSearchIndexDo(LoggedInHandler):
def get(self):
team_keys = Team.query().fetch(keys_only=True)
for team_key in team_keys:
taskqueue.add(
queue_name='search-index-update',
url='/tasks/do/update_team_search_index/' + team_key.id(),
method='GET')
class AdminUpdateTeamSearchIndexDo(LoggedInHandler):
def get(self, team_key):
team = Team.get_by_id(team_key)
SearchHelper.update_team_awards_index(team)
SearchHelper.update_team_location_index(team)
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides the web interface for changing internal_only property of a Bot."""
import logging
from google.appengine.api import taskqueue
from google.appengine.datastore import datastore_query
from google.appengine.ext import ndb
from dashboard import add_point_queue
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.common import datastore_hooks
from dashboard.common import stored_object
from dashboard.models import anomaly
from dashboard.models import graph_data
# Number of Row entities to process at once.
_MAX_ROWS_TO_PUT = 25
# Number of TestMetadata entities to process at once.
_MAX_TESTS_TO_PUT = 25
# Which queue to use for tasks started by this handler. Must be in queue.yaml.
_QUEUE_NAME = 'migrate-queue'
class ChangeInternalOnlyHandler(request_handler.RequestHandler):
"""Changes internal_only property of Bot, TestMetadata, and Row."""
def get(self):
"""Renders the UI for selecting bots."""
masters = {}
bots = graph_data.Bot.query().fetch()
for bot in bots:
master_name = bot.key.parent().string_id()
bot_name = bot.key.string_id()
bots = masters.setdefault(master_name, [])
bots.append({
'name': bot_name,
'internal_only': bot.internal_only,
})
logging.info('MASTERS: %s', masters)
self.RenderHtml('change_internal_only.html', {
'masters': masters,
})
def post(self):
"""Updates the selected bots internal_only property.
POST requests will be made by the task queue; tasks are added to the task
queue either by a kick-off POST from the front-end form, or by this handler
itself.
Request parameters:
internal_only: "true" if turning on internal_only, else "false".
bots: Bots to update. Multiple bots parameters are possible; the value
of each should be a string like "MasterName/platform-name".
test: An urlsafe Key for a TestMetadata entity.
cursor: An urlsafe Cursor; this parameter is only given if we're part-way
through processing a Bot or a TestMetadata.
Outputs:
A message to the user if this request was started by the web form,
or an error message if something went wrong, or nothing.
"""
# /change_internal_only should be only accessible if one has administrator
# privileges, so requests are guaranteed to be authorized.
datastore_hooks.SetPrivilegedRequest()
internal_only_string = self.request.get('internal_only')
if internal_only_string == 'true':
internal_only = True
elif internal_only_string == 'false':
internal_only = False
else:
self.ReportError('No internal_only field')
return
bot_names = self.request.get_all('bots')
test_key_urlsafe = self.request.get('test')
cursor = self.request.get('cursor', None)
if bot_names and len(bot_names) > 1:
self._UpdateMultipleBots(bot_names, internal_only)
self.RenderHtml('result.html', {
'headline': ('Updating internal_only. This may take some time '
'depending on the data to update. Check the task queue '
'to determine whether the job is still in progress.'),
})
elif bot_names and len(bot_names) == 1:
self._UpdateBot(bot_names[0], internal_only, cursor=cursor)
elif test_key_urlsafe:
self._UpdateTest(test_key_urlsafe, internal_only, cursor=cursor)
def _UpdateBotWhitelist(self, bot_master_names, internal_only):
"""Updates the global bot_whitelist object, otherwise subsequent add_point
calls will overwrite our work."""
bot_whitelist = stored_object.Get(add_point_queue.BOT_WHITELIST_KEY)
bot_names = [b.split('/')[1] for b in bot_master_names]
if internal_only:
bot_whitelist = [b for b in bot_whitelist if b not in bot_names]
else:
bot_whitelist.extend(bot_names)
bot_whitelist = list(set(bot_whitelist))
bot_whitelist.sort()
stored_object.Set(add_point_queue.BOT_WHITELIST_KEY, bot_whitelist)
def _UpdateMultipleBots(self, bot_names, internal_only):
"""Kicks off update tasks for individual bots and their tests."""
self._UpdateBotWhitelist(bot_names, internal_only)
for bot_name in bot_names:
taskqueue.add(
url='/change_internal_only',
params={
'bots': bot_name,
'internal_only': 'true' if internal_only else 'false'
},
queue_name=_QUEUE_NAME)
def _UpdateBot(self, bot_name, internal_only, cursor=None):
"""Starts updating internal_only for the given bot and associated data."""
master, bot = bot_name.split('/')
bot_key = ndb.Key('Master', master, 'Bot', bot)
if not cursor:
# First time updating for this Bot.
bot_entity = bot_key.get()
if bot_entity.internal_only != internal_only:
bot_entity.internal_only = internal_only
bot_entity.put()
else:
cursor = datastore_query.Cursor(urlsafe=cursor)
# Fetch a certain number of TestMetadata entities starting from cursor. See:
# https://developers.google.com/appengine/docs/python/ndb/queryclass
# Start update tasks for each existing subordinate TestMetadata.
test_query = graph_data.TestMetadata.query(
graph_data.TestMetadata.master_name == master,
graph_data.TestMetadata.bot_name == bot)
test_keys, next_cursor, more = test_query.fetch_page(
_MAX_TESTS_TO_PUT, start_cursor=cursor, keys_only=True)
for test_key in test_keys:
taskqueue.add(
url='/change_internal_only',
params={
'test': test_key.urlsafe(),
'internal_only': 'true' if internal_only else 'false',
},
queue_name=_QUEUE_NAME)
if more:
taskqueue.add(
url='/change_internal_only',
params={
'bots': bot_name,
'cursor': next_cursor.urlsafe(),
'internal_only': 'true' if internal_only else 'false',
},
queue_name=_QUEUE_NAME)
def _UpdateTest(self, test_key_urlsafe, internal_only, cursor=None):
"""Updates the given TestMetadata and associated Row entities."""
test_key = ndb.Key(urlsafe=test_key_urlsafe)
if not cursor:
# First time updating for this TestMetadata.
test_entity = test_key.get()
if test_entity.internal_only != internal_only:
test_entity.internal_only = internal_only
test_entity.put()
# Update all of the Anomaly entities for this test.
# Assuming that this should be fast enough to do in one request
# for any one test.
anomalies = anomaly.Anomaly.GetAlertsForTest(test_key)
for anomaly_entity in anomalies:
if anomaly_entity.internal_only != internal_only:
anomaly_entity.internal_only = internal_only
ndb.put_multi(anomalies)
else:
cursor = datastore_query.Cursor(urlsafe=cursor)
# Fetch a certain number of Row entities starting from cursor.
rows_query = graph_data.Row.query(
graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
rows, next_cursor, more = rows_query.fetch_page(
_MAX_ROWS_TO_PUT, start_cursor=cursor)
for row in rows:
if row.internal_only != internal_only:
row.internal_only = internal_only
ndb.put_multi(rows)
if more:
taskqueue.add(
url='/change_internal_only',
params={
'test': test_key_urlsafe,
'cursor': next_cursor.urlsafe(),
'internal_only': 'true' if internal_only else 'false',
},
queue_name=_QUEUE_NAME)
|
|
import sys
import json
from meta.MetaProcessor import MetaProcessor
from meta.utils import Utils
import re
import copy
import traceback
class Platform(MetaProcessor):
"""docstring for Platform"""
def __init__(self,config,stringUtils):
super(Platform, self).__init__(config, stringUtils)
self.entityPattern = re.compile("entity", re.IGNORECASE)
self.servicePattern = re.compile("service", re.IGNORECASE)
def findEntities(self,hash):
"""docstring for findEntities"""
return self.findDictsWithKey(hash,"entityName")
def preprocessType(self,dic):
"""docstring for preprocessType"""
if dic!=None and 'type' in dic:
typeValue = dic['type']
dic['type_' + typeValue] = True
def preprocessProperty(self,property,hash,hashes):
"""docstring for preprocessProperty"""
print("preprocess property " + str(property))
return self.globalPlatform.processProperty(property,hash,hashes)
# print("preprocess property " + str(property))
#
# type = property['type']
# property['type_relationship'] = False
# property['type_' + type] = True
#
# if type=='string' or type=='url':
# property['type'] = 'NSString'
# property['object'] = True
# elif type=='integer':
# property['type'] = 'NSInteger'
# property['object'] = False
# elif type=='float':
# property['type'] = 'CGFloat'
# property['object'] = False
# elif type=='double':
# property['type'] = 'double'
# property['object'] = False
# elif type=='bool':
# property['type'] = 'BOOL'
# property['object'] = False
# elif type=='date':
# property['type'] = 'NSDate'
# property['object'] = True
# elif type=='relationship':
# if property['relationshipType']=='toMany':
# property['_toMany_'] = True
# else:
# property['_toMany_'] = False
#
# property.update(self.preprocessRelationship(property,hashes))
# else:
# Utils.printError("Error: unknown property type: " + type)
# sys.exit()
def preprocessHash(self,key,hashName,hash,hashes):
"""Preprocess entity defined in a different hash"""
hashFileName = hashName + ".json"
matching = [s for s in hashes if hashFileName==os.path.basename(s)]
if len(matching)>0:
with open (matching[0], "r") as f:
hashString = f.read()
hashObject = json.loads(hashString)
return self.preprocessModel(hashObject,hashes)
else:
Utils.printError("Error: hash: " + hashName + " not found in " + str(hashes))
sys.exit()
def preprocessPrimaryKeys(self,primaryKeys,model,hashes):
"""docstring for preprocessPrimaryKeys"""
self.preprocessList(primaryKeys)
if 'properties' in model:
properties = model['properties']
for primaryKey in primaryKeys:
for property in properties:
if primaryKey['name']==property['name']:
primaryKey['type'] = property['type']
self.preprocessType(primaryKey)
break
def preprocessDateFormats(self,key):
"""docstring for preprocessDateFormats"""
formats = []
for property in key['properties']:
if 'format' in property:
format = property['format']
formatIsUnique = True
for f in formats:
if f['format']==format:
property['_format_name_'] = f['name']
formatIsUnique = False
break
if formatIsUnique:
formatObject = { "name" : property['name'], "format" : format }
property['_format_name_'] = property['name']
formats.append(formatObject)
key['_formats_'] = formats
def preprocessModel(self,model,hashes):
"""Preprocess entity"""
# print("preprocess model " + str(model))
print("processing " + model['entityName'])
if 'primaryKeys' in model:
self.preprocessPrimaryKeys(model['primaryKeys'],model,hashes)
if not '_entity_imports_' in model:
model['_entity_imports_'] = []
model['_entity_imports_'].append({ "name" : model['entityName'] })# self.finalEntityFileName(model['entityName'],model) + '.h' })
for property in model['properties']:
self.preprocessProperty(property,model,hashes)
self.preprocessDateFormats(model)
self.preprocessRelationships(model,hashes)
return model
def preprocessRelationships(self,model,hashes):
"""docstring for preprocessRelationships"""
if 'relationships' in model:
relationships = model['relationships']
self.preprocessList(relationships)
for relationship in relationships:
relationship['entityName'] = self.finalEntityFileName(relationship['entityName'],model)
if (relationship['type']=='toMany'):
relationship['_toMany_'] = True
else:
relationship['_toMany_'] = False
self.preprocessModel(relationship,hashes)
def preprocessResultValue(self,resultValue):
"""docstring for preprocessResultValue"""
if 'type' in resultValue:
type = resultValue['type']
if type=='string':
resultValue['okValue'] = '@"' + resultValue['okValue'] + '"'
resultValue['errorValue'] = '@"' + resultValue['errorValue'] + '"'
elif type=='integer':
resultValue['okValue'] = "@" + resultValue['okValue']
resultValue['errorValue'] = "@" + resultValue['errorValue']
else:
print("Error: unknown result value type: " + type)
sys.exit()
self.preprocessType(resultValue)
def preprocess(self,hash,hashes):
# hash['_entity_imports_'] = []
if hash!=None and 'resultValue' in hash:
resultValue = hash['resultValue']
self.preprocessResultValue(resultValue)
entity = hash
if hash!=None and 'content' in hash:
contents = hash['content']
self.preprocessList(contents)
for content in contents:
if content!=None and 'model' in content:
entity = content['model']
self.preprocessModel(entity,hashes)
def process(self,hashes,templates,product,platform,platformDir):
assert hashes
assert templates
assert product
assert platform
assert platformDir
self.globalPlatform = self.globalProcessor(platform)
hashes = self.sortHashes(hashes)
for hashFile in hashes:
hash = self.readHash(hashFile)
entities = self.findEntities(hash)
for templateFile in templates:
if re.search('entity', templateFile, re.IGNORECASE):
for entity in entities:
entityCopy = copy.deepcopy(entity)
self.continueProcess(entityCopy,entity['entityName'],hashes,templateFile,product,platform,platformDir)
else:
hashCopy = copy.deepcopy(hash)
self.continueProcess(hashCopy,hashFile,hashes,templateFile,product,platform,platformDir)
def continueProcess(self,hash,hashFile,hashes,templateFile,product,platform,platformDir):
# Global Platform preprocess
if self.globalPlatform!=None:
if self.config.verbose:
print('Global Preprocessing')
self.globalPlatform.preprocess(hash,hashes)
if self.config.verbose:
print("Hash after global preprocess: " + str(hash))
self.preprocess(hash,hashes)
if self.config.verbose:
print("Hash after product preprocess: " + str(hash))
if self.config.verbose:
with open("/tmp/final_hash" + os.path.basename(hashFile) + "_" + product + "_" + platform, "w") as f:
f.write(str(hash))
renderer = self.renderer(platformDir)
self.renderTemplate(renderer,templateFile,hash,hashes,product,platform,platformDir)
# def renderTemplate(self,renderer,templateFile,hash,hashes,product,platform,platformDir):
# """docstring for renderTemplate"""
# assert renderer
# assert templateFile
# assert hash
# assert hashes
# assert product
# assert platform
# assert platformDir
#
#
# entities = self.findEntities(hash)
#
# if re.search('entity', templateFile, re.IGNORECASE):
# print("entity in template name")
#
# for entity in entities:
# entity['_globals_'] = hash['_globals_']
#
# newKey = self.preprocessModel(entity,hashes)
# entity.update(newKey)
#
# fileName = self.finalFileName(os.path.basename(templateFile),entity['entityName'],entity)
# print("final file name: " + fileName)
# # hash['_current_model_'] = entity
# self.performRenderTemplate(renderer,templateFile,fileName,entity,hashes,product,platform,platformDir)
# else:
# fileName = self.finalFileName(os.path.basename(templateFile),None,hash)
# self.performRenderTemplate(renderer,templateFile,fileName,hash,hashes,product,platform,platformDir)
# def performRenderTemplate(self,renderer,templateFile,fileName,hash,hashes,product,platform,platformDir):
# """docstring for renderTemplate"""
# assert renderer
# assert templateFile
# assert hash
# assert hashes
# assert product
# assert platform
# assert platformDir
#
# template = self.readTemplate(templateFile)
#
# if hash!=None and '_globals_' in hash:
# # Remove .template
# realFileName, extension = os.path.splitext(fileName)
#
# # Split final file name into components
# baseName, extension = os.path.splitext(realFileName)
#
# hash['_globals_']['fileName'] = realFileName
# hash['_globals_']['fileBaseName'] = baseName
# hash['_globals_']['fileExtension'] = extension
#
# if self.config.verbose:
# print('Hash: ' + str(hash))
#
# rendered = renderer.render_path(templateFile,hash)
#
# outputPath = self.outputDir(product,platform,fileName)
#
# Utils.printOutput("Rendering to file: " + outputPath)
#
# with open(outputPath, "w") as f:
# f.write(rendered)
#
def finalEntityFileName(self,fileName,hash):
"""docstring for finalFileName"""
# print("Hash " + str(hash))
prefix = ""
if hash!=None and '_globals_' in hash:
globals = hash['_globals_']
if 'prefix' in globals:
prefix = globals['prefix']
fileName = prefix + fileName
return fileName
def finalFileName(self,fileName,hash):
"""docstring for finalFileName"""
prefix = ""
if hash!=None and '_globals_' in hash:
globals = hash['_globals_']
if 'prefix' in globals:
prefix = globals['prefix']
if self.config.verbose:
print("PREFIX " + prefix)
# print("Hash " + str(hash))
serviceName = ""
if hash!=None and 'serviceName' in hash:
serviceName = self.stringUtils.capitalize(hash['serviceName'])
print("filename " + fileName)
if 'entityName' in hash:
entityName = hash['entityName']
print("entityname " + entityName)
fileName = self.entityPattern.sub(entityName, fileName)
print("filename " + fileName)
fileName = self.servicePattern.sub(serviceName, fileName)
fileName = prefix + fileName
return fileName
|
|
def prototype_state():
state = {}
# Random seed
state['seed'] = 1234
# Logging level
state['level'] = 'DEBUG'
# ----- DATA -----
# (all Nones in this section are placeholders for required values)
# Source sequences (must be singleton list for backward compatibility)
state['source'] = [None]
# Target sequences (must be singleton list for backard compatiblity)
state['target'] = [None]
# index -> word dict for the source language
state['indx_word'] = None
# index -> word dict for the target language
state['indx_word_target'] = None
# word -> index dict for the source language
state['word_indx'] = None
# word -> index dict for the target language
state['word_indx_trgt'] = None
# ----- VOCABULARIES -----
# (all Nones in this section are placeholders for required values)
# A string representation for the unknown word placeholder for both language
state['oov'] = 'UNK'
# These are unknown word placeholders
state['unk_sym_source'] = 1
state['unk_sym_target'] = 1
# These are end-of-sequence marks
state['null_sym_source'] = None
state['null_sym_target'] = None
# These are vocabulary sizes for the source and target languages
state['n_sym_source'] = None
state['n_sym_target'] = None
# ----- MODEL STRUCTURE -----
# The components of the annotations produced by the Encoder
state['last_forward'] = True
state['last_backward'] = False
state['forward'] = False
state['backward'] = False
# Turns on "search" mechanism
state['search'] = False
# Turns on using the shortcut from the previous word to the current one
state['bigram'] = True
# Turns on initialization of the first hidden state from the annotations
state['bias_code'] = True
# Turns on using the context to compute the next Decoder state
state['decoding_inputs'] = True
# Turns on an intermediate maxout layer in the output
state['deep_out'] = True
# Heights of hidden layers' stacks in encoder and decoder
# WARNING: has not been used for quite while and most probably
# doesn't work...
state['encoder_stack'] = 1
state['decoder_stack'] = 1
# Use the top-most recurrent layer states as annotations
# WARNING: makes sense only for hierachical RNN which
# are in fact currently not supported
state['take_top'] = True
# Activates age old bug fix - should always be true
state['check_first_word'] = True
state['eps'] = 1e-10
# ----- MODEL COMPONENTS -----
# Low-rank approximation activation function
state['rank_n_activ'] = 'lambda x: x'
# Hidden-to-hidden activation function
state['activ'] = 'lambda x: TT.tanh(x)'
# Nonlinearity for the output
state['unary_activ'] = 'Maxout(2)'
# Hidden layer configuration for the forward encoder
state['enc_rec_layer'] = 'RecurrentLayer'
state['enc_rec_gating'] = True
state['enc_rec_reseting'] = True
state['enc_rec_gater'] = 'lambda x: TT.nnet.sigmoid(x)'
state['enc_rec_reseter'] = 'lambda x: TT.nnet.sigmoid(x)'
# Hidden layer configuration for the decoder
state['dec_rec_layer'] = 'RecurrentLayer'
state['dec_rec_gating'] = True
state['dec_rec_reseting'] = True
state['dec_rec_gater'] = 'lambda x: TT.nnet.sigmoid(x)'
state['dec_rec_reseter'] = 'lambda x: TT.nnet.sigmoid(x)'
# Default hidden layer configuration, which is effectively used for
# the backward RNN
# TODO: separate back_enc_ configuration and convert the old states
# to have it
state['rec_layer'] = 'RecurrentLayer'
state['rec_gating'] = True
state['rec_reseting'] = True
state['rec_gater'] = 'lambda x: TT.nnet.sigmoid(x)'
state['rec_reseter'] = 'lambda x: TT.nnet.sigmoid(x)'
# ----- SIZES ----
# Dimensionality of hidden layers
state['dim'] = 1000
# Dimensionality of low-rank approximation
state['rank_n_approx'] = 100
# k for the maxout stage of output generation
state['maxout_part'] = 2.
# ----- WEIGHTS, INITIALIZATION -----
# This one is bias applied in the recurrent layer. It is likely
# to be zero as MultiLayer already has bias.
state['bias'] = 0.
# Weights initializer for the recurrent net matrices
state['rec_weight_init_fn'] = 'sample_weights_orth'
state['rec_weight_scale'] = 1.
# Weights initializer for other matrices
state['weight_init_fn'] = 'sample_weights_classic'
state['weight_scale'] = 0.01
# ---- REGULARIZATION -----
# WARNING: dropout is not tested and probably does not work.
# Dropout in output layer
state['dropout'] = 1.
# Dropout in recurrent layers
state['dropout_rec'] = 1.
# WARNING: weight noise regularization is not tested
# and most probably does not work.
# Random weight noise regularization settings
state['weight_noise'] = False
state['weight_noise_rec'] = False
state['weight_noise_amount'] = 0.01
# Threshold to clip the gradient
state['cutoff'] = 1.
# A magic gradient clipping option that you should never change...
state['cutoff_rescale_length'] = 0.
# ----- TRAINING METHOD -----
# Turns on noise contrastive estimation instead maximum likelihood
state['use_nce'] = False
# Choose optimization algorithm
state['algo'] = 'SGD_adadelta'
# Adadelta hyperparameters
state['adarho'] = 0.95
state['adaeps'] = 1e-6
# Early stopping configuration
# WARNING: was never changed during machine translation experiments,
# as early stopping was not used.
state['patience'] = 1
state['lr'] = 0.1
state['minlr'] = 0
# Batch size
state['bs'] = 64
# We take this many minibatches, merge them,
# sort the sentences according to their length and create
# this many new batches with less padding.
state['sort_k_batches'] = 10
# Maximum sequence length
state['seqlen'] = 30
# Turns on trimming the trailing paddings from batches
# consisting of short sentences.
state['trim_batches'] = True
# Loop through the data
state['use_infinite_loop'] = True
# Start from a random entry
state['shuffle'] = False
# ----- TRAINING PROCESS -----
# Prefix for the model, state and timing files
state['prefix'] = 'phrase_'
# Specifies whether old model should be reloaded first
state['reload'] = True
# When set to 0 each new model dump will be saved in a new file
state['overwrite'] = 1
# Number of batches to process
state['loopIters'] = 2000000
# Maximum number of minutes to run
state['timeStop'] = 24*60*31
# Error level to stop at
state['minerr'] = -1
# Reset data iteration every this many epochs
state['reset'] = -1
# Frequency of training error reports (in number of batches)
state['trainFreq'] = 1
# Frequency of running hooks
state['hookFreq'] = 13
# Validation frequency
state['validFreq'] = 500
# Model saving frequency (in minutes)
state['saveFreq'] = 10
# Sampling hook settings
state['n_samples'] = 3
state['n_examples'] = 3
# Raise exception if nan
state['on_nan'] = 'raise'
return state
def prototype_phrase_state():
"""This prototype is the configuration used in the paper
'Learning Phrase Representations using RNN Encoder-Decoder
for Statistical Machine Translation' """
state = prototype_state()
# state['source'] = ["/data/lisatmp3/bahdanau/shuffled/phrase-table.en.h5"]
# state['target'] = ["/data/lisatmp3/bahdanau/shuffled/phrase-table.fr.h5"]
# state['indx_word'] = "/data/lisatmp3/chokyun/mt/ivocab_source.pkl"
# state['indx_word_target'] = "/data/lisatmp3/chokyun/mt/ivocab_target.pkl"
# state['word_indx'] = "/data/lisatmp3/chokyun/mt/vocab.en.pkl"
# state['word_indx_trgt'] = "/data/lisatmp3/bahdanau/vocab.fr.pkl"
data_basic = "./dataPreprocess/used/train"
state['target'] = [data_basic + "/binarized_text.shuffled.en.h5"]
state['source'] = [data_basic + "/binarized_text.shuffled.cn.h5"]
state['indx_word'] = data_basic + "/ivocab.cn.pkl"
state['indx_word_target'] = data_basic + "/ivocab.en.pkl"
state['word_indx'] = data_basic + "/vocab.cn.pkl"
state['word_indx_trgt'] = data_basic + "/vocab.en.pkl"
state['null_sym_source'] = 5946
state['null_sym_target'] = 5369
state['n_sym_source'] = state['null_sym_source'] + 1
state['n_sym_target'] = state['null_sym_target'] + 1
return state
def prototype_encdec_state():
"""This prototype is the configuration used to train the RNNenc-30 model from the paper
'Neural Machine Translation by Jointly Learning to Align and Translate' """
state = prototype_state()
data_basic = "./dataPreprocess/used/train"
state['target'] = [data_basic + "/binarized_text.shuffled.en.h5"]
state['source'] = [data_basic + "/binarized_text.shuffled.cn.h5"]
state['indx_word'] = data_basic + "/ivocab.cn.pkl"
state['indx_word_target'] = data_basic + "/ivocab.en.pkl"
state['word_indx'] = data_basic + "/vocab.cn.pkl"
state['word_indx_trgt'] = data_basic + "/vocab.en.pkl"
state['null_sym_source'] = 5946
state['null_sym_target'] = 5369
state['n_sym_source'] = state['null_sym_source'] + 1
state['n_sym_target'] = state['null_sym_target'] + 1
state['seqlen'] = 50
state['bs'] = 100
state['dim'] = 1000
state['rank_n_approx'] = 620
state['prefix'] = 'encdec_'
return state
def prototype_search_state():
"""This prototype is the configuration used to train the RNNsearch-50 model from the paper
'Neural Machine Translation by Jointly Learning to Align and Translate' """
state = prototype_encdec_state()
state['dec_rec_layer'] = 'RecurrentLayerWithSearch'
state['search'] = True
state['last_forward'] = False
state['forward'] = True
state['backward'] = True
state['seqlen'] = 50
state['sort_k_batches'] = 20
state['prefix'] = 'search_'
return state
def prototype_phrase_lstm_state():
state = prototype_phrase_state()
state['enc_rec_layer'] = 'LSTMLayer'
state['enc_rec_gating'] = False
state['enc_rec_reseting'] = False
state['dec_rec_layer'] = 'LSTMLayer'
state['dec_rec_gating'] = False
state['dec_rec_reseting'] = False
state['dim_mult'] = 4
state['prefix'] = 'phrase_lstm_'
return state
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module for saving host usernames. """
import csv
import os
import sys
from datetime import datetime
from query import run_query
def get_pr_reviewers(name, owner):
""" Gets the reviewer usernames for a particular repository.
This method processes the results from the Github API query. It looks
for review requested and pull request review items in the pull request
timeline, since those are generally associated with hosts.
Args:
name: A string containing the repository name.
owner: A string containing the repository owner.
Returns:
A JSON object with the pull request reviewer information.
"""
query = f"""{{
repository(name: "{name}", owner: "{owner}") {{
pullRequests(first: 5) {{
nodes {{
createdAt
resourcePath
timelineItems(first: 100) {{
nodes {{
... on ReviewRequestedEvent {{
requestedReviewer {{
... on User {{
login
}}
}}
}}
... on PullRequestReview {{
author {{
login
}}
}}
}}
}}
}}
}}
}}
}}"""
return run_query(query)
def process_reviewer_query_results(result, host_dict, intern_usernames):
""" Processes the query results for the reviewer usernames.
Updates host_dict if new host usernames are found. Assumes that reviewers
in starter projects are hosts.
Args:
result: The results from the query.
host_dict: Dictionary containing host information.
intern_usernames: Set of known intern usernames.
Returns:
None.
"""
try:
pull_requests = (result["data"]["repository"]["pullRequests"]["nodes"])
except:
raise Exception(
"Query results for reviewers does not have a structure that is"
" currently supported by RISR.")
start_dates = [
"2020-05-18",
"2020-06-15",
"2020-07-06"
]
start_dates = [datetime.fromisoformat(date) for date in start_dates]
for pull_request in pull_requests:
timeline_items = pull_request["timelineItems"]["nodes"]
for review_item in timeline_items:
if not review_item:
continue
if "requestedReviewer" in review_item:
host = review_item["requestedReviewer"]
if "author" in review_item:
host = review_item["author"]
# Special case in which host account has been deleted
try:
host_username = host["login"]
except (TypeError, KeyError):
continue
# Check if host is already in dictionary and if intern reviewed
# their own pull request.
if host_username not in host_dict and \
host_username not in intern_usernames:
date_created = datetime.fromisoformat(pull_request["createdAt"][:-1])
start_date = start_dates[0]
for date in start_dates:
if date <= date_created:
start_date = date
else:
break
start_date = start_date.strftime("%-m/%-d/%Y")
# Team number is unknown in this case.
host_dict[host_username] = [start_date, "unknown"]
def get_hosts_from_teams_csv(teams_file, host_dict):
""" Gets host information from the STEP teams CSV file.
Args:
teams_file: File name for STEP teams CSV.
host_dict: Dictionary to be updated with host information.
"""
with open(teams_file, newline="") as in_csv:
reader = csv.DictReader(in_csv)
for row in reader:
start_date = row["Start Date"]
team = row["Team Number"]
host1 = row["Github username 1"]
host2 = row["Github username 2"]
if host1:
host_dict[host1] = [start_date, team]
if host2:
host_dict[host2] = [start_date, team]
def get_interns_from_repos_csv(repos_file, intern_usernames):
""" Gets intern usernames from owners of starter project repositories.
Args:
repos_file: File name for repository CSV.
intern_usernames: Set to be updated with intern usernames.
"""
with open(repos_file, newline="") as in_csv:
reader = csv.DictReader(in_csv)
for row in reader:
if row["repo_type"] == "capstone":
continue
intern_usernames.add(row["owner"])
def get_hosts_from_pr_reviews(repos_file, host_dict, intern_usernames):
""" Gets host username based on pull request reviewers.
Only checks starter project repositories because the capstone projects
had a peer review component. Capstone repositories, which are made in the
googleinterns organization, are ignored.
The test repo_type is used for testing and refers to the RISR repository.
Although RISR is owned by the googleinterns organization, it should not be
skipped.
Args.
repos_file: File name for the repository CSV.
host_dict: Dictionary to be updated with host information.
intern_usernames: Set containing intern usernames.
"""
with open(repos_file, newline="") as in_csv:
reader = csv.DictReader(in_csv)
for row in reader:
if row["repo_type"] == "capstone":
continue
# Ignore repositories made in the googleinterns organization.
if row["owner"] == "googleinterns" and row["repo_type"] != "test":
continue
query_results = get_pr_reviewers(row["name"], row["owner"])
process_reviewer_query_results(
query_results,
host_dict,
intern_usernames
)
def write_host_information(hosts_file, host_dict):
""" Writes host information to CSV file.
Args:
hosts_file: File name for host usernames CSV.
host_dict: Dictionary containing host usernames, start dates,
and team number.
"""
with open(hosts_file, "w", newline="") as out_csv:
writer = csv.writer(out_csv)
writer.writerow(["username", "start_date", "team"])
for host in host_dict:
writer.writerow([host, host_dict[host][0], host_dict[host][1]])
def main():
""" Saves host information from STEP teams CSV and pull request reviews.
Expects file path to STEP teams CSV to be provided in the command
line arguments.
host_info.csv is created to store the host usernames, intern start
date, and team number from the STEP teams CSV.
"""
try:
teams_file = sys.argv[1]
except:
raise Exception("Usage: host.py <STEP teams CSV>")
if not os.path.isfile(teams_file):
raise Exception("The CSV for the Github usernames does not exist.")
repos_file = "data/repos.csv"
hosts_file = "data/host_info.csv"
host_dict = dict()
get_hosts_from_teams_csv(teams_file, host_dict)
intern_usernames = set()
get_interns_from_repos_csv(repos_file, intern_usernames)
get_hosts_from_pr_reviews(repos_file, host_dict, intern_usernames)
write_host_information(hosts_file, host_dict)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Runs kubernetes e2e test with specified config"""
import argparse
import hashlib
import os
import re
import shutil
import signal
import subprocess
import sys
import traceback
ORIG_CWD = os.getcwd() # Checkout changes cwd
def test_infra(*paths):
"""Return path relative to root of test-infra repo."""
return os.path.join(ORIG_CWD, os.path.dirname(__file__), '..', *paths)
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def check_output(*cmd):
"""Log and run the command, raising on errors, return output"""
print >>sys.stderr, 'Run:', cmd
return subprocess.check_output(cmd)
def check_env(env, *cmd):
"""Log and run the command with a specific env, raising on errors."""
print >>sys.stderr, 'Environment:'
for key, value in env.items():
print >>sys.stderr, '%s=%s' % (key, value)
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd, env=env)
def kubekins(tag):
"""Return full path to kubekins-e2e:tag."""
return 'gcr.io/k8s-testimages/kubekins-e2e:%s' % tag
def parse_env(env):
"""Returns (FOO, BAR=MORE) for FOO=BAR=MORE."""
return env.split('=', 1)
def kubeadm_version(mode):
"""Return string to use for kubeadm version, given the job's mode (ci/pull/periodic)."""
version = ''
if mode in ['ci', 'periodic']:
# This job only runs against the kubernetes repo, and bootstrap.py leaves the
# current working directory at the repository root. Grab the SCM_REVISION so we
# can use the .debs built during the bazel-build job that should have already
# succeeded.
status = re.search(
r'STABLE_BUILD_SCM_REVISION ([^\n]+)',
check_output('hack/print-workspace-status.sh')
)
if not status:
raise ValueError('STABLE_BUILD_SCM_REVISION not found')
version = status.group(1)
# Work-around for release-1.6 jobs, which still upload debs to an older
# location (without os/arch prefixes).
# TODO(pipejakob): remove this when we no longer support 1.6.x.
if version.startswith("v1.6."):
return 'gs://kubernetes-release-dev/bazel/%s/build/debs/' % version
elif mode == 'pull':
version = '%s/%s' % (os.environ['PULL_NUMBER'], os.getenv('PULL_REFS'))
else:
raise ValueError("Unknown kubeadm mode given: %s" % mode)
# The path given here should match jobs/ci-kubernetes-bazel-build.sh
return 'gs://kubernetes-release-dev/bazel/%s/bin/linux/amd64/' % version
class LocalMode(object):
"""Runs e2e tests by calling kubetest."""
def __init__(self, workspace, artifacts):
self.workspace = workspace
self.artifacts = artifacts
self.env = []
self.os_env = []
self.env_files = []
self.add_environment(
'HOME=%s' % workspace,
'WORKSPACE=%s' % workspace,
'PATH=%s' % os.getenv('PATH'),
)
def add_environment(self, *envs):
"""Adds FOO=BAR to the list of environment overrides."""
self.env.extend(parse_env(e) for e in envs)
def add_os_environment(self, *envs):
"""Adds FOO=BAR to the list of os environment overrides."""
self.os_env.extend(parse_env(e) for e in envs)
def add_file(self, env_file):
"""Reads all FOO=BAR lines from env_file."""
with open(env_file) as fp:
for line in fp:
line = line.rstrip()
if not line or line.startswith('#'):
continue
self.env_files.append(parse_env(line))
def add_aws_cred(self, priv, pub, cred):
"""Sets aws keys and credentials."""
self.add_environment('JENKINS_AWS_SSH_PRIVATE_KEY_FILE=%s' % priv)
self.add_environment('JENKINS_AWS_SSH_PUBLIC_KEY_FILE=%s' % pub)
self.add_environment('JENKINS_AWS_CREDENTIALS_FILE=%s' % cred)
def add_gce_ssh(self, priv, pub):
"""Copies priv, pub keys to $WORKSPACE/.ssh."""
ssh_dir = '%s/.ssh' % self.workspace
if not os.path.isdir(ssh_dir):
os.makedirs(ssh_dir)
gce_ssh = '%s/google_compute_engine' % ssh_dir
gce_pub = '%s/google_compute_engine.pub' % ssh_dir
shutil.copy(priv, gce_ssh)
shutil.copy(pub, gce_pub)
self.add_environment(
'JENKINS_GCE_SSH_PRIVATE_KEY_FILE=%s' % gce_ssh,
'JENKINS_GCE_SSH_PUBLIC_KEY_FILE=%s' % gce_pub,
)
def add_service_account(self, path):
"""Sets GOOGLE_APPLICATION_CREDENTIALS to path."""
self.add_environment('GOOGLE_APPLICATION_CREDENTIALS=%s' % path)
def add_k8s(self, *a, **kw):
"""Add specified k8s.io repos (noop)."""
pass
def use_latest_image(self, image_family, image_project):
"""Gets the latest image from the image_family in the image_project."""
out = check_output(
'gcloud', 'compute', 'images', 'describe-from-family',
image_family, '--project=%s' % image_project)
latest_image = next(
(line[6:].strip() for line in out.split('\n') if (
line.startswith('name: '))),
None)
if not latest_image:
raise ValueError(
'Failed to get the latest image from family %s in project %s' % (
image_family, image_project))
# TODO(yguo0905): Support this in GKE.
self.add_environment(
'KUBE_GCE_NODE_IMAGE=%s' % latest_image,
'KUBE_GCE_NODE_PROJECT=%s' % image_project)
print >>sys.stderr, 'Set KUBE_GCE_NODE_IMAGE=%s' % latest_image
print >>sys.stderr, 'Set KUBE_GCE_NODE_PROJECT=%s' % image_project
def start(self, args):
"""Starts kubetest."""
print >>sys.stderr, 'starts with local mode'
env = {}
env.update(self.os_env)
env.update(self.env_files)
env.update(self.env)
# Do not interfere with the local project
project = env.get('PROJECT')
if project:
try:
check('gcloud', 'config', 'set', 'project', env['PROJECT'])
except subprocess.CalledProcessError:
print >>sys.stderr, 'Fail to set project %r', project
else:
print >>sys.stderr, 'PROJECT not set in job, will use local project'
check_env(env, 'kubetest', *args)
class DockerMode(object):
"""Runs e2e tests via docker run kubekins-e2e."""
def __init__(self, container, artifacts, sudo, tag, mount_paths):
self.tag = tag
try: # Pull a newer version if one exists
check('docker', 'pull', kubekins(tag))
except subprocess.CalledProcessError:
pass
print 'Starting %s...' % container
self.container = container
self.local_artifacts = artifacts
self.artifacts = '/workspace/_artifacts'
self.cmd = [
'docker', 'run', '--rm',
'--name=%s' % container,
'-v', '%s:%s' % (artifacts, self.artifacts),
'-v', '/etc/localtime:/etc/localtime:ro',
]
for path in mount_paths or []:
self.cmd.extend(['-v', path])
if sudo:
self.cmd.extend(['-v', '/var/run/docker.sock:/var/run/docker.sock'])
self._add_env_var('HOME=/workspace')
self._add_env_var('WORKSPACE=/workspace')
def add_environment(self, *envs):
"""Adds FOO=BAR to the -e list for docker.
Host-specific environment variables are ignored."""
# TODO(krzyzacy) change this to a whitelist?
docker_env_ignore = [
'GOOGLE_APPLICATION_CREDENTIALS',
'GOPATH',
'GOROOT',
'HOME',
'PATH',
'PWD',
'WORKSPACE'
]
for env in envs:
key, _value = parse_env(env)
if key in docker_env_ignore:
print >>sys.stderr, 'Skipping environment variable %s' % env
else:
self._add_env_var(env)
def add_os_environment(self, *envs):
"""Adds os envs as FOO=BAR to the -e list for docker."""
self.add_environment(*envs)
def _add_env_var(self, env):
"""Adds a single environment variable to the -e list for docker.
Does not check against any blacklists."""
self.cmd.extend(['-e', env])
def add_file(self, env_file):
"""Adds the file to the --env-file list."""
self.cmd.extend(['--env-file', env_file])
def add_k8s(self, k8s, *repos):
"""Add the specified k8s.io repos into container."""
for repo in repos:
self.cmd.extend([
'-v', '%s/%s:/go/src/k8s.io/%s' % (k8s, repo, repo)])
def add_aws_cred(self, priv, pub, cred):
"""Mounts aws keys/creds inside the container."""
aws_ssh = '/workspace/.ssh/kube_aws_rsa'
aws_pub = '%s.pub' % aws_ssh
aws_cred = '/workspace/.aws/credentials'
self.cmd.extend([
'-v', '%s:%s:ro' % (priv, aws_ssh),
'-v', '%s:%s:ro' % (pub, aws_pub),
'-v', '%s:%s:ro' % (cred, aws_cred),
])
def add_gce_ssh(self, priv, pub):
"""Mounts priv and pub inside the container."""
gce_ssh = '/workspace/.ssh/google_compute_engine'
gce_pub = '%s.pub' % gce_ssh
self.cmd.extend([
'-v', '%s:%s:ro' % (priv, gce_ssh),
'-v', '%s:%s:ro' % (pub, gce_pub),
'-e', 'JENKINS_GCE_SSH_PRIVATE_KEY_FILE=%s' % gce_ssh,
'-e', 'JENKINS_GCE_SSH_PUBLIC_KEY_FILE=%s' % gce_pub])
def add_service_account(self, path):
"""Mounts GOOGLE_APPLICATION_CREDENTIALS inside the container."""
service = '/service-account.json'
self.cmd.extend([
'-v', '%s:%s:ro' % (path, service),
'-e', 'GOOGLE_APPLICATION_CREDENTIALS=%s' % service])
def start(self, args):
"""Runs kubetest inside a docker container."""
print >>sys.stderr, 'starts with docker mode'
cmd = list(self.cmd)
cmd.append(kubekins(self.tag))
cmd.extend(args)
signal.signal(signal.SIGTERM, self.sig_handler)
signal.signal(signal.SIGINT, self.sig_handler)
try:
check(*cmd)
finally: # Ensure docker files are readable by bootstrap
if not os.path.isdir(self.local_artifacts): # May not exist
pass
try:
check('sudo', 'chmod', '-R', 'o+r', self.local_artifacts)
except subprocess.CalledProcessError: # fails outside CI
traceback.print_exc()
def sig_handler(self, _signo, _frame):
"""Stops container upon receive signal.SIGTERM and signal.SIGINT."""
print >>sys.stderr, 'docker stop (signo=%s, frame=%s)' % (_signo, _frame)
check('docker', 'stop', self.container)
def cluster_name(cluster, build):
"""Return or select a cluster name."""
if cluster:
return cluster
if len(build) < 20:
return 'e2e-%s' % build
return 'e2e-%s' % hashlib.md5(build).hexdigest()[:10]
def main(args):
"""Set up env, start kubekins-e2e, handle termination. """
# pylint: disable=too-many-branches,too-many-statements
# Rules for env var priority here in docker:
# -e FOO=a -e FOO=b -> FOO=b
# --env-file FOO=a --env-file FOO=b -> FOO=b
# -e FOO=a --env-file FOO=b -> FOO=a(!!!!)
# --env-file FOO=a -e FOO=b -> FOO=b
#
# So if you overwrite FOO=c for a local run it will take precedence.
#
# dockerized-e2e-runner goodies setup
workspace = os.environ.get('WORKSPACE', os.getcwd())
artifacts = '%s/_artifacts' % workspace
if not os.path.isdir(artifacts):
os.makedirs(artifacts)
container = '%s-%s' % (os.environ.get('JOB_NAME'), os.environ.get('BUILD_NUMBER'))
if args.mode == 'docker':
sudo = args.docker_in_docker or args.build is not None
mode = DockerMode(container, artifacts, sudo, args.tag, args.mount_paths)
elif args.mode == 'local':
mode = LocalMode(workspace, artifacts) # pylint: disable=bad-option-value
else:
raise ValueError(args.mode)
if args.env_file:
for env_file in args.env_file:
mode.add_file(test_infra(env_file))
if args.aws:
# Enforce aws credential/keys exists
for path in [args.aws_ssh, args.aws_pub, args.aws_cred]:
if not os.path.isfile(os.path.expandvars(path)):
raise IOError(path, os.path.expandvars(path))
mode.add_aws_cred(args.aws_ssh, args.aws_pub, args.aws_cred)
if args.gce_ssh:
mode.add_gce_ssh(args.gce_ssh, args.gce_pub)
if args.service_account:
mode.add_service_account(args.service_account)
# TODO(fejta): remove after next image push
mode.add_environment('KUBETEST_MANUAL_DUMP=y')
runner_args = [
'-v',
'--dump=%s' % mode.artifacts,
]
if args.build is not None:
if args.build == '':
# Empty string means --build was passed without any arguments;
# if --build wasn't passed, args.build would be None
runner_args.append('--build')
else:
runner_args.append('--build=%s' % args.build)
k8s = os.getcwd()
if not os.path.basename(k8s) == 'kubernetes':
raise ValueError(k8s)
mode.add_k8s(os.path.dirname(k8s), 'kubernetes', 'release')
# TODO(fejta): move these out of this file
if args.up == 'true':
runner_args.append('--up')
if args.down == 'true':
runner_args.append('--down')
if args.test == 'true':
runner_args.append('--test')
cluster = cluster_name(args.cluster, os.getenv('BUILD_NUMBER', 0))
runner_args.extend(args.kubetest_args)
if args.kubeadm:
version = kubeadm_version(args.kubeadm)
runner_args.extend([
'--kubernetes-anywhere-path=/workspace/kubernetes-anywhere',
'--kubernetes-anywhere-phase2-provider=kubeadm',
'--kubernetes-anywhere-cluster=%s' % cluster,
'--kubernetes-anywhere-kubeadm-version=%s' % version,
])
# TODO(fejta): delete this?
mode.add_os_environment(*(
'%s=%s' % (k, v) for (k, v) in os.environ.items()))
mode.add_environment(
# Boilerplate envs
# Skip gcloud update checking
'CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=true',
# Use default component update behavior
'CLOUDSDK_EXPERIMENTAL_FAST_COMPONENT_UPDATE=false',
# E2E
'E2E_NAME=%s' % cluster,
# AWS
'KUBE_AWS_INSTANCE_PREFIX=%s' % cluster,
# GCE
'INSTANCE_PREFIX=%s' % cluster,
'KUBE_GCE_NETWORK=%s' % cluster,
'KUBE_GCE_INSTANCE_PREFIX=%s' % cluster,
# GKE
'CLUSTER_NAME=%s' % cluster,
'KUBE_GKE_NETWORK=%s' % cluster,
)
if args and args.image_family and args.image_project:
mode.use_latest_image(args.image_family, args.image_project)
mode.start(runner_args)
def create_parser():
"""Create argparser."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--mode', default='local', choices=['local', 'docker'])
parser.add_argument(
'--env-file', action="append", help='Job specific environment file')
parser.add_argument(
'--image-family',
help='The image family from which to fetch the latest image')
parser.add_argument(
'--image-project',
help='The image project from which to fetch the test images')
parser.add_argument(
'--aws', action='store_true', help='E2E job runs in aws')
parser.add_argument(
'--aws-ssh',
default=os.environ.get('JENKINS_AWS_SSH_PRIVATE_KEY_FILE'),
help='Path to private aws ssh keys')
parser.add_argument(
'--aws-pub',
default=os.environ.get('JENKINS_AWS_SSH_PUBLIC_KEY_FILE'),
help='Path to pub aws ssh key')
parser.add_argument(
'--aws-cred',
default=os.environ.get('JENKINS_AWS_CREDENTIALS_FILE'),
help='Path to aws credential file')
parser.add_argument(
'--gce-ssh',
default=os.environ.get('JENKINS_GCE_SSH_PRIVATE_KEY_FILE'),
help='Path to .ssh/google_compute_engine keys')
parser.add_argument(
'--gce-pub',
default=os.environ.get('JENKINS_GCE_SSH_PUBLIC_KEY_FILE'),
help='Path to pub gce ssh key')
parser.add_argument(
'--service-account',
default=os.environ.get('GOOGLE_APPLICATION_CREDENTIALS'),
help='Path to service-account.json')
parser.add_argument(
'--mount-paths',
action='append',
help='Paths that should be mounted within the docker container in the form local:remote')
parser.add_argument(
'--build', nargs='?', default=None, const='',
help='Build kubernetes binaries if set, optionally specifying strategy')
parser.add_argument(
'--cluster', default='bootstrap-e2e', help='Name of the cluster')
parser.add_argument(
'--docker-in-docker', action='store_true', help='Enable run docker within docker')
parser.add_argument(
'--kubeadm', choices=['ci', 'periodic', 'pull'])
parser.add_argument(
'--tag', default='v20170714-94e76415', help='Use a specific kubekins-e2e tag if set')
parser.add_argument(
'--test', default='true', help='If we need to run any actual test within kubetest')
parser.add_argument(
'--down', default='true', help='If we need to tear down the e2e cluster')
parser.add_argument(
'--up', default='true', help='If we need to bring up a e2e cluster')
parser.add_argument(
'--kubetest_args',
action='append',
default=[],
help='Send unrecognized args directly to kubetest')
return parser
def parse_args(args=None):
"""Return args, adding unrecognized args to kubetest_args."""
parser = create_parser()
args, extra = parser.parse_known_args(args)
args.kubetest_args += extra
if (args.image_family or args.image_project) and args.mode == 'docker':
raise ValueError(
'--image-family / --image-project is not supported in docker mode')
if bool(args.image_family) != bool(args.image_project):
raise ValueError(
'--image-family and --image-project must be both set or unset')
return args
if __name__ == '__main__':
main(parse_args())
|
|
#!/usr/bin/env python
from __future__ import division, print_function
import logging
FORMAT = '[%(asctime)s] %(name)-15s %(message)s'
DATEFMT = "%H:%M:%S"
logging.basicConfig(format=FORMAT, datefmt=DATEFMT, level=logging.INFO)
import theano
import theano.tensor as T
import ipdb
import fuel
from argparse import ArgumentParser
from collections import OrderedDict
from theano import tensor
from fuel.streams import DataStream, ForceFloatX
from fuel.schemes import SequentialScheme
from fuel.datasets.binarized_mnist import BinarizedMNIST
from blocks.algorithms import GradientDescent, CompositeRule, StepClipping, RMSProp, Adam, RemoveNotFinite
from blocks.initialization import Constant, IsotropicGaussian, Orthogonal
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.roles import WEIGHTS, BIASES, PARAMETER
from blocks.model import Model
from blocks.monitoring import aggregation
from blocks.extensions import FinishAfter, Timing, Printing, ProgressBar
from blocks.extensions.plot import Plot
from blocks.extensions.saveload import SerializeMainLoop
from blocks.extensions.monitoring import DataStreamMonitoring, TrainingDataMonitoring
from blocks.main_loop import MainLoop
from blocks.bricks import Tanh, MLP
from blocks.bricks.cost import BinaryCrossEntropy
from blocks.bricks.recurrent import SimpleRecurrent, LSTM
from draw import *
from attention import ZoomableAttentionWindow
fuel.config.floatX = theano.config.floatX
#----------------------------------------------------------------------------
def main(name, dataset, epochs, batch_size, learning_rate):
if name is None:
name = "att-rw"
print("\nRunning experiment %s" % name)
print(" learning rate: %5.3f" % learning_rate)
print()
image_size, channels, data_train, data_valid, data_test = datasets.get_data(dataset)
train_stream = Flatten(DataStream.default_stream(data_train, iteration_scheme=SequentialScheme(data_train.num_examples, batch_size)))
valid_stream = Flatten(DataStream.default_stream(data_valid, iteration_scheme=SequentialScheme(data_valid.num_examples, batch_size)))
test_stream = Flatten(DataStream.default_stream(data_test, iteration_scheme=SequentialScheme(data_test.num_examples, batch_size)))
#------------------------------------------------------------------------
img_height, img_width = 28, 28
read_N = 12
write_N = 14
inits = {
#'weights_init': Orthogonal(),
'weights_init': IsotropicGaussian(0.001),
'biases_init': Constant(0.),
}
x_dim = img_height * img_width
reader = ZoomableAttentionWindow(img_height, img_width, read_N)
writer = ZoomableAttentionWindow(img_height, img_width, write_N)
# Parameterize the attention reader and writer
mlpr = MLP(activations=[Tanh(), Identity()],
dims=[x_dim, 50, 5],
name="RMLP",
**inits)
mlpw = MLP(activations=[Tanh(), Identity()],
dims=[x_dim, 50, 5],
name="WMLP",
**inits)
# MLP between the reader and writer
mlp = MLP(activations=[Tanh(), Identity()],
dims=[read_N**2, 300, write_N**2],
name="MLP",
**inits)
for brick in [mlpr, mlpw, mlp]:
brick.allocate()
brick.initialize()
#------------------------------------------------------------------------
x = tensor.matrix('features')
hr = mlpr.apply(x)
hw = mlpw.apply(x)
center_y, center_x, delta, sigma, gamma = reader.nn2att(hr)
r = reader.read(x, center_y, center_x, delta, sigma)
h = mlp.apply(r)
center_y, center_x, delta, sigma, gamma = writer.nn2att(hw)
c = writer.write(h, center_y, center_x, delta, sigma) / gamma
x_recons = T.nnet.sigmoid(c)
cost = BinaryCrossEntropy().apply(x, x_recons)
cost.name = "cost"
#------------------------------------------------------------
cg = ComputationGraph([cost])
params = VariableFilter(roles=[PARAMETER])(cg.variables)
algorithm = GradientDescent(
cost=cost,
parameters=params,
step_rule=CompositeRule([
RemoveNotFinite(),
Adam(learning_rate),
StepClipping(3.),
])
#step_rule=RMSProp(learning_rate),
#step_rule=Momentum(learning_rate=learning_rate, momentum=0.95)
)
#------------------------------------------------------------------------
# Setup monitors
monitors = [cost]
for v in [center_y, center_x, log_delta, log_sigma, log_gamma]:
v_mean = v.mean()
v_mean.name = v.name
monitors += [v_mean]
monitors += [aggregation.mean(v)]
train_monitors = monitors[:]
train_monitors += [aggregation.mean(algorithm.total_gradient_norm)]
train_monitors += [aggregation.mean(algorithm.total_step_norm)]
# Live plotting...
plot_channels = [
["cost"],
]
#------------------------------------------------------------
#mnist_train = BinarizedMNIST("train", sources=['features'])
#mnist_test = BinarizedMNIST("test", sources=['features'])
#mnist_train = MNIST("train", binary=True, sources=['features'])
#mnist_test = MNIST("test", binary=True, sources=['features'])
main_loop = MainLoop(
model=Model(cost),
data_stream=test_stream,
algorithm=algorithm,
extensions=[
Timing(),
FinishAfter(after_n_epochs=epochs),
DataStreamMonitoring(
monitors,
test_stream,
prefix="test"),
SerializeMainLoop(name+".pkl"),
#Plot(name, channels=plot_channels),
ProgressBar(),
Printing()])
if oldmodel is not None:
print("Initializing parameters with old model %s"%oldmodel)
with open(oldmodel, "rb") as f:
oldmodel = pickle.load(f)
main_loop.model.set_parameter_values(oldmodel.get_param_values())
del oldmodel
main_loop.run()
#-----------------------------------------------------------------------------
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--name", type=str, dest="name",
default=None, help="Name for this experiment")
parser.add_argument("--epochs", type=int, dest="epochs",
default=25, help="Number of training epochs to do")
parser.add_argument("--bs", "--batch-size", type=int, dest="batch_size",
default=100, help="Size of each mini-batch")
parser.add_argument("--lr", "--learning-rate", type=float, dest="learning_rate",
default=1e-3, help="Learning rate")
args = parser.parse_args()
main(**vars(args))
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pexpect
from tempfile import NamedTemporaryFile
from trove.common import cfg
from trove.common import utils
from trove.common.exception import GuestError
from trove.common.exception import ProcessExecutionError
from trove.openstack.common import log as logging
from trove.common.i18n import _
TMP_MOUNT_POINT = "/mnt/volume"
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VolumeDevice(object):
def __init__(self, device_path):
self.device_path = device_path
def migrate_data(self, source_dir):
"""Synchronize the data from the source directory to the new
volume.
"""
self.mount(TMP_MOUNT_POINT, write_to_fstab=False)
if not source_dir[-1] == '/':
source_dir = "%s/" % source_dir
utils.execute("sudo", "rsync", "--safe-links", "--perms",
"--recursive", "--owner", "--group", "--xattrs",
"--sparse", source_dir, TMP_MOUNT_POINT)
self.unmount(TMP_MOUNT_POINT)
def _check_device_exists(self):
"""Check that the device path exists.
Verify that the device path has actually been created and can report
it's size, only then can it be available for formatting, retry
num_tries to account for the time lag.
"""
try:
num_tries = CONF.num_tries
LOG.debug("Checking if %s exists." % self.device_path)
utils.execute('sudo', 'blockdev', '--getsize64', self.device_path,
attempts=num_tries)
except ProcessExecutionError:
LOG.exception(_("Error getting device status"))
raise GuestError(_("InvalidDevicePath(path=%s)") %
self.device_path)
def _check_format(self):
"""Checks that an unmounted volume is formatted."""
cmd = "sudo dumpe2fs %s" % self.device_path
LOG.debug("Checking whether %s is formated: %s." %
(self.device_path, cmd))
child = pexpect.spawn(cmd)
try:
i = child.expect(['has_journal', 'Wrong magic number'])
if i == 0:
return
volume_fstype = CONF.volume_fstype
raise IOError(
_('Device path at {0} did not seem to be {1}.').format(
self.device_path, volume_fstype))
except pexpect.EOF:
raise IOError(_("Volume was not formatted."))
child.expect(pexpect.EOF)
def _format(self):
"""Calls mkfs to format the device at device_path."""
volume_fstype = CONF.volume_fstype
format_options = CONF.format_options
cmd = "sudo mkfs -t %s %s %s" % (volume_fstype,
format_options, self.device_path)
volume_format_timeout = CONF.volume_format_timeout
LOG.debug("Formatting %s. Executing: %s." %
(self.device_path, cmd))
child = pexpect.spawn(cmd, timeout=volume_format_timeout)
# child.expect("(y,n)")
# child.sendline('y')
child.expect(pexpect.EOF)
def format(self):
"""Formats the device at device_path and checks the filesystem."""
self._check_device_exists()
self._format()
self._check_format()
def mount(self, mount_point, write_to_fstab=True):
"""Mounts, and writes to fstab."""
LOG.debug("Will mount %s at %s." % (self.device_path, mount_point))
mount_point = VolumeMountPoint(self.device_path, mount_point)
mount_point.mount()
if write_to_fstab:
mount_point.write_to_fstab()
def resize_fs(self, mount_point):
"""Resize the filesystem on the specified device."""
self._check_device_exists()
try:
# check if the device is mounted at mount_point before e2fsck
if not os.path.ismount(mount_point):
utils.execute("e2fsck", "-f", "-p", self.device_path,
run_as_root=True, root_helper="sudo")
utils.execute("resize2fs", self.device_path,
run_as_root=True, root_helper="sudo")
except ProcessExecutionError:
LOG.exception(_("Error resizing file system."))
raise GuestError(_("Error resizing the filesystem: %s") %
self.device_path)
def unmount(self, mount_point):
if os.path.exists(mount_point):
cmd = "sudo umount %s" % mount_point
child = pexpect.spawn(cmd)
child.expect(pexpect.EOF)
def unmount_device(self, device_path):
# unmount if device is already mounted
mount_points = self.mount_points(device_path)
for mnt in mount_points:
LOG.info(_("Device %(device)s is already mounted in "
"%(mount_point)s. Unmounting now.") %
{'device': device_path, 'mount_point': mnt})
self.unmount(mnt)
def mount_points(self, device_path):
"""Returns a list of mount points on the specified device."""
try:
cmd = "grep %s /etc/mtab | awk '{print $2}'" % device_path
stdout, stderr = utils.execute(cmd, shell=True)
return stdout.strip().split('\n')
except ProcessExecutionError:
LOG.exception(_("Error retrieving mount points"))
raise GuestError(_("Could not obtain a list of mount points for "
"device: %s") % device_path)
def set_readahead_size(self, readahead_size,
execute_function=utils.execute):
"""Set the readahead size of disk."""
self._check_device_exists()
try:
execute_function("sudo", "blockdev", "--setra",
readahead_size, self.device_path)
except ProcessExecutionError:
LOG.exception(_("Error setting readhead size to %(size)s "
"for device %(device)s.") %
{'size': readahead_size, 'device': self.device_path})
raise GuestError(_("Error setting readhead size: %s.") %
self.device_path)
class VolumeMountPoint(object):
def __init__(self, device_path, mount_point):
self.device_path = device_path
self.mount_point = mount_point
self.volume_fstype = CONF.volume_fstype
self.mount_options = CONF.mount_options
def mount(self):
if not os.path.exists(self.mount_point):
utils.execute("sudo", "mkdir", "-p", self.mount_point)
LOG.debug("Mounting volume. Device path:{0}, mount_point:{1}, "
"volume_type:{2}, mount options:{3}".format(
self.device_path, self.mount_point, self.volume_fstype,
self.mount_options))
cmd = ("sudo mount -t %s -o %s %s %s" %
(self.volume_fstype, self.mount_options, self.device_path,
self.mount_point))
child = pexpect.spawn(cmd)
child.expect(pexpect.EOF)
def write_to_fstab(self):
fstab_line = ("%s\t%s\t%s\t%s\t0\t0" %
(self.device_path, self.mount_point, self.volume_fstype,
self.mount_options))
LOG.debug("Writing new line to fstab:%s" % fstab_line)
with open('/etc/fstab', "r") as fstab:
fstab_content = fstab.read()
with NamedTemporaryFile(delete=False) as tempfstab:
tempfstab.write(fstab_content + fstab_line)
utils.execute("sudo", "install", "-o", "root", "-g", "root", "-m",
"644", tempfstab.name, "/etc/fstab")
os.remove(tempfstab.name)
|
|
import base64
import os
import shutil
import string
import sys
import tempfile
import unittest
from datetime import timedelta
from http import cookies
from django.conf import settings
from django.contrib.sessions.backends.base import UpdateError
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import \
SessionStore as CacheDBSession
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import \
SessionStore as CookieSession
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.sessions.serializers import (
JSONSerializer, PickleSerializer,
)
from django.core import management
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.http import HttpResponse
from django.test import (
RequestFactory, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import patch_logger
from django.utils import timezone
from .models import SessionStore as CustomDatabaseSession
class SessionTestsMixin:
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertIs(self.session.modified, False)
self.assertIs(self.session.accessed, False)
def test_get_empty(self):
self.assertIsNone(self.session.get('cat'))
def test_store(self):
self.session['cat'] = "dog"
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertIsNone(self.session.get('some key'))
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_default_named_argument(self):
self.assertEqual(self.session.pop('some key', default='does not exist'), 'does not exist')
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_no_default_keyerror_raised(self):
with self.assertRaises(KeyError):
self.session.pop('some key')
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_update(self):
self.session.update({'update key': 1})
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertIs(self.session.accessed, True)
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.values()), [1])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_keys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.keys()), ['x'])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_items(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_save(self):
self.session.save()
self.assertIs(self.session.exists(self.session.session_key), True)
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertIs(self.session.exists(self.session.session_key), False)
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertIsNone(self.session.session_key)
self.assertIs(self.session.modified, True)
self.assertIs(self.session.accessed, True)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_cycle_with_no_session_cache(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_data = self.session.items()
self.session = self.backend(self.session.session_key)
self.assertIs(hasattr(self.session, '_session_cache'), False)
self.session.cycle_key()
self.assertCountEqual(self.session.items(), prev_data)
def test_save_doesnt_clear_data(self):
self.session['a'] = 'b'
self.session.save()
self.assertEqual(self.session['a'], 'b')
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
session.save()
self.assertNotEqual(session.session_key, '1')
self.assertIsNone(session.get('cat'))
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_empty_string_invalid(self):
"""Falsey values (Such as an empty string) are rejected."""
self.session._session_key = ''
self.assertIsNone(self.session.session_key)
def test_session_key_too_short_invalid(self):
"""Strings shorter than 8 characters are rejected."""
self.session._session_key = '1234567'
self.assertIsNone(self.session.session_key)
def test_session_key_valid_string_saved(self):
"""Strings of length 8 and up are accepted and stored."""
self.session._session_key = '12345678'
self.assertEqual(self.session.session_key, '12345678')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
with self.assertRaises(AttributeError):
set_session_key(self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), False)
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), True)
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b'flaskdj:alkdjf')
with patch_logger('django.security.SuspiciousSession', 'warning') as calls:
self.assertEqual({}, self.session.decode(bad_encode))
# check that the failed decode is logged
self.assertEqual(len(calls), 1)
self.assertIn('corrupted', calls[0])
def test_actual_expiry(self):
# this doesn't work with JSONSerializer (serializing timedelta)
with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'):
self.session = self.backend() # reinitialize after overriding settings
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
def test_session_load_does_not_create_record(self):
"""
Loading an unknown session key does not create a session record.
Creating session records on load is a DOS vulnerability.
"""
session = self.backend('someunknownkey')
session.load()
self.assertIsNone(session.session_key)
self.assertIs(session.exists(session.session_key), False)
# provided unknown key was cycled, not reused
self.assertNotEqual(session.session_key, 'someunknownkey')
def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self):
"""
Sessions shouldn't be resurrected by a concurrent request.
"""
# Create new session.
s1 = self.backend()
s1['test_data'] = 'value1'
s1.save(must_create=True)
# Logout in another context.
s2 = self.backend(s1.session_key)
s2.delete()
# Modify session in first context.
s1['test_data'] = 'value2'
with self.assertRaises(UpdateError):
# This should throw an exception as the session is deleted, not
# resurrect the session.
s1.save()
self.assertEqual(s1.load(), {})
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
session_engine = 'django.contrib.sessions.backends.db'
@property
def model(self):
return self.backend.get_model_class()
def test_session_str(self):
"Session repr should be the session key."
self.session['x'] = 1
self.session.save()
session_key = self.session.session_key
s = self.model.objects.get(session_key=session_key)
self.assertEqual(str(s), session_key)
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
# Change it
self.model.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, self.model.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, self.model.objects.count())
with override_settings(SESSION_ENGINE=self.session_engine):
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, self.model.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CustomDatabaseSessionTests(DatabaseSessionTests):
backend = CustomDatabaseSession
session_engine = 'sessions_tests.models'
def test_extra_session_field(self):
# Set the account ID to be picked up by a custom session storage
# and saved to a custom session model database column.
self.session['_auth_user_id'] = 42
self.session.save()
# Make sure that the customized create_model_instance() was called.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.account_id, 42)
# Make the session "anonymous".
self.session.pop('_auth_user_id')
self.session.save()
# Make sure that save() on an existing session did the right job.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertIsNone(s.account_id)
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertIs(self.session.exists(self.session.session_key), True)
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS.
with self.assertRaises(InvalidCacheBackendError):
self.backend()
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super().setUp()
def tearDown(self):
super().tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
with self.assertRaises(ImproperlyConfigured):
self.backend()
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an IOError - by creating
# a new session, making it unclear whether the slashes were detected.
with self.assertRaises(InvalidSessionKey):
self.backend()._key_to_file("a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
with self.assertRaises(InvalidSessionKey):
self.backend()._key_to_file("a/b/c")
@override_settings(
SESSION_ENGINE="django.contrib.sessions.backends.file",
SESSION_COOKIE_AGE=0,
)
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([
session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)
])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# One object in the present without an expiry (should be deleted since
# its modification time + SESSION_COOKIE_AGE will be in the past when
# clearsessions runs).
other_session2 = self.backend()
other_session2['foo'] = 'bar'
other_session2.save()
# Three sessions are in the filesystem before clearsessions...
self.assertEqual(3, count_sessions())
management.call_command('clearsessions')
# ... and two are deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertIsNotNone(caches['default'].get(self.session.cache_key))
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'session',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertIsNone(caches['default'].get(self.session.cache_key))
self.assertIsNotNone(caches['sessions'].get(self.session.cache_key))
def test_create_and_save(self):
self.session = self.backend()
self.session.create()
self.session.save()
self.assertIsNotNone(caches['default'].get(self.session.cache_key))
class SessionMiddlewareTests(TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]['secure'], True)
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'], True)
self.assertIn(
cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertEqual(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'], '')
self.assertNotIn(
cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
def test_session_save_on_500(self):
request = RequestFactory().get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# The value wasn't saved above.
self.assertNotIn('hello', request.session.load())
def test_session_update_error_redirect(self):
path = '/foo/'
request = RequestFactory().get(path)
response = HttpResponse()
middleware = SessionMiddleware()
request.session = DatabaseSession()
request.session.save(must_create=True)
request.session.delete()
msg = (
"The request's session was deleted before the request completed. "
"The user may have logged out in a concurrent request, for example."
)
with self.assertRaisesMessage(SuspiciousOperation, msg):
# Handle the response through the middleware. It will try to save
# the deleted session which will cause an UpdateError that's caught
# and raised as a SuspiciousOperation.
middleware.process_response(request, response)
def test_session_delete_on_end(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# The cookie was deleted, not recreated.
# A deleted cookie header looks like:
# Set-Cookie: sessionid=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}={}; expires=Thu, 01-Jan-1970 00:00:00 GMT; '
'Max-Age=0; Path=/'.format(
settings.SESSION_COOKIE_NAME,
'""' if sys.version_info >= (3, 5) else '',
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
@override_settings(SESSION_COOKIE_DOMAIN='.example.local', SESSION_COOKIE_PATH='/example/')
def test_session_delete_on_end_with_custom_domain_and_path(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# The cookie was deleted, not recreated.
# A deleted cookie header with a custom domain and path looks like:
# Set-Cookie: sessionid=; Domain=.example.local;
# expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0;
# Path=/example/
self.assertEqual(
'Set-Cookie: {}={}; Domain=.example.local; expires=Thu, '
'01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/example/'.format(
settings.SESSION_COOKIE_NAME,
'""' if sys.version_info >= (3, 5) else '',
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
def test_flush_empty_without_session_cookie_doesnt_set_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# A cookie should not be set.
self.assertEqual(response.cookies, {})
# The session is accessed so "Vary: Cookie" should be set.
self.assertEqual(response['Vary'], 'Cookie')
def test_empty_session_saved(self):
"""
If a session is emptied of data but still has a key, it should still
be updated.
"""
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Set a session key and some data.
middleware.process_request(request)
request.session['foo'] = 'bar'
# Handle the response through the middleware.
response = middleware.process_response(request, response)
self.assertEqual(tuple(request.session.items()), (('foo', 'bar'),))
# A cookie should be set, along with Vary: Cookie.
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Empty the session data.
del request.session['foo']
# Handle the response through the middleware.
response = HttpResponse('Session test')
response = middleware.process_response(request, response)
self.assertEqual(dict(request.session.values()), {})
session = Session.objects.get(session_key=request.session.session_key)
self.assertEqual(session.get_decoded(), {})
# While the session is empty, it hasn't been flushed so a cookie should
# still be set, along with Vary: Cookie.
self.assertGreater(len(request.session.session_key), 8)
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class CookieSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super().test_actual_expiry()
def test_unpickling_exception(self):
# signed_cookies backend should handle unpickle exceptions gracefully
# by creating a new session
self.assertEqual(self.session.serializer, JSONSerializer)
self.session.save()
self.session.serializer = PickleSerializer
self.session.load()
@unittest.skip("Cookie backend doesn't have an external store to create records in.")
def test_session_load_does_not_create_record(self):
pass
@unittest.skip("CookieSession is stored in the client and there is no way to query it.")
def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self):
pass
|
|
import logging
import sys
from collections import OrderedDict
from types import ModuleType
from typing import Dict, Union, List, Tuple, Any, Optional
from django.contrib import admin
from django.contrib.admin import AdminSite
from django.db import DatabaseError
from django.db.models import Model, Field
from .exceptions import SitePrefsException
from .models import Preference
from .signals import prefs_save
from .utils import import_prefs, get_frame_locals, traverse_local_prefs, get_pref_model_admin_class, \
get_pref_model_class, PrefProxy, PatchedLocal, Frame
__PATCHED_LOCALS_SENTINEL = '__siteprefs_locals_patched'
__PREFS_REGISTRY = None
__PREFS_DEFAULT_REGISTRY = OrderedDict()
__MODELS_REGISTRY = {}
LOGGER = logging.getLogger(__name__)
def on_pref_update(*args, **kwargs):
"""Triggered on dynamic preferences model save.
Issues DB save and reread.
"""
Preference.update_prefs(*args, **kwargs)
Preference.read_prefs(get_prefs())
prefs_save.connect(on_pref_update)
def get_prefs() -> dict:
"""Returns a dictionary with all preferences discovered by siteprefs."""
global __PREFS_REGISTRY
if __PREFS_REGISTRY is None:
__PREFS_REGISTRY = __PREFS_DEFAULT_REGISTRY
return __PREFS_REGISTRY
def get_app_prefs(app: str = None) -> dict:
"""Returns a dictionary with preferences for a certain app/module.
:param app:
"""
if app is None:
with Frame(stepback=1) as frame:
app = frame.f_globals['__name__'].split('.')[0]
prefs = get_prefs()
if app not in prefs:
return {}
return prefs[app]
def get_prefs_models() -> Dict[str, Model]:
"""Returns registered preferences models indexed by application names."""
return __MODELS_REGISTRY
def bind_proxy(
values: Union[List, Tuple],
category: str = None,
field: Field = None,
verbose_name: str = None,
help_text: str = '',
static: bool = True,
readonly: bool = False
) -> List[PrefProxy]:
"""Binds PrefProxy objects to module variables used by apps as preferences.
:param values: Preference values.
:param category: Category name the preference belongs to.
:param field: Django model field to represent this preference.
:param verbose_name: Field verbose name.
:param help_text: Field help text.
:param static: Leave this preference static (do not store in DB).
:param readonly: Make this field read only.
"""
addrs = OrderedDict()
depth = 3
for local_name, locals_dict in traverse_local_prefs(depth):
addrs[id(locals_dict[local_name])] = local_name
proxies = []
locals_dict = get_frame_locals(depth)
for value in values: # Try to preserve fields order.
id_val = id(value)
if id_val in addrs:
local_name = addrs[id_val]
local_val = locals_dict[local_name]
if isinstance(local_val, PatchedLocal) and not isinstance(local_val, PrefProxy):
proxy = PrefProxy(
local_name, value.val,
category=category,
field=field,
verbose_name=verbose_name,
help_text=help_text,
static=static,
readonly=readonly,
)
app_name = locals_dict['__name__'].split('.')[-2] # x.y.settings -> y
prefs = get_prefs()
if app_name not in prefs:
prefs[app_name] = OrderedDict()
prefs[app_name][local_name.lower()] = proxy
# Replace original pref variable with a proxy.
locals_dict[local_name] = proxy
proxies.append(proxy)
return proxies
def register_admin_models(admin_site: AdminSite):
"""Registers dynamically created preferences models for Admin interface.
:param admin_site: AdminSite object.
"""
global __MODELS_REGISTRY
prefs = get_prefs()
for app_label, prefs_items in prefs.items():
model_class = get_pref_model_class(app_label, prefs_items, get_app_prefs)
if model_class is not None:
__MODELS_REGISTRY[app_label] = model_class
admin_site.register(model_class, get_pref_model_admin_class(prefs_items))
def autodiscover_siteprefs(admin_site: AdminSite = None):
"""Automatically discovers and registers all preferences available in all apps.
:param admin_site: Custom AdminSite object.
"""
import_prefs()
try:
Preference.read_prefs(get_prefs())
except DatabaseError:
# This may occur if run from manage.py (or its wrapper) when db is not yet initialized.
LOGGER.warning('Unable to read preferences from database. Skip.')
else:
if admin_site is None:
admin_site = admin.site
register_admin_models(admin_site)
def patch_locals(depth: int = 2):
"""Temporarily (see unpatch_locals()) replaces all module variables
considered preferences with PatchedLocal objects, so that every
variable has different hash returned by id().
"""
for name, locals_dict in traverse_local_prefs(depth):
locals_dict[name] = PatchedLocal(name, locals_dict[name])
get_frame_locals(depth)[__PATCHED_LOCALS_SENTINEL] = True # Sentinel.
def unpatch_locals(depth: int = 3):
"""Restores the original values of module variables
considered preferences if they are still PatchedLocal
and not PrefProxy.
"""
for name, locals_dict in traverse_local_prefs(depth):
if isinstance(locals_dict[name], PatchedLocal):
locals_dict[name] = locals_dict[name].val
del get_frame_locals(depth)[__PATCHED_LOCALS_SENTINEL]
class ModuleProxy:
"""Proxy to handle module attributes access."""
def __init__(self):
self._module: Optional[ModuleType] = None
self._prefs = []
def bind(self, module: ModuleType, prefs: List[str]):
"""
:param module:
:param prefs: Preference names. Just to speed up __getattr__.
"""
self._module = module
self._prefs = set(prefs)
def __getattr__(self, name: str) -> Any:
value = getattr(self._module, name)
if name in self._prefs:
# It is a PrefProxy
value = value.value
return value
def proxy_settings_module(depth: int = 3):
"""Replaces a settings module with a Module proxy to intercept
an access to settings.
:param depth: Frame count to go backward.
"""
proxies = []
modules = sys.modules
module_name = get_frame_locals(depth)['__name__']
module_real = modules[module_name]
for name, locals_dict in traverse_local_prefs(depth):
value = locals_dict[name]
if isinstance(value, PrefProxy):
proxies.append(name)
new_module = type(module_name, (ModuleType, ModuleProxy), {})(module_name) # ModuleProxy
new_module.bind(module_real, proxies)
modules[module_name] = new_module
def register_prefs(*args: PrefProxy, **kwargs):
"""Registers preferences that should be handled by siteprefs.
Expects preferences as *args.
Use keyword arguments to batch apply params supported by
``PrefProxy`` to all preferences not constructed by ``pref`` and ``pref_group``.
Batch kwargs:
:param str help_text: Field help text.
:param bool static: Leave this preference static (do not store in DB).
:param bool readonly: Make this field read only.
:param bool swap_settings_module: Whether to automatically replace settings module
with a special ``ProxyModule`` object to access dynamic values of settings
transparently (so not to bother with calling ``.value`` of ``PrefProxy`` object).
"""
swap_settings_module = bool(kwargs.get('swap_settings_module', True))
if __PATCHED_LOCALS_SENTINEL not in get_frame_locals(2):
raise SitePrefsException('Please call `patch_locals()` right before the `register_prefs()`.')
bind_proxy(args, **kwargs)
unpatch_locals()
swap_settings_module and proxy_settings_module()
def pref_group(
title: str,
prefs: Union[List, Tuple],
help_text: str = '',
static: bool = True,
readonly: bool = False
):
"""Marks preferences group.
:param title: Group title
:param prefs: Preferences to group.
:param help_text: Field help text.
:param static: Leave this preference static (do not store in DB).
:param readonly: Make this field read only.
"""
bind_proxy(prefs, title, help_text=help_text, static=static, readonly=readonly)
for proxy in prefs: # For preferences already marked by pref().
if isinstance(proxy, PrefProxy):
proxy.category = title
def pref(
preference: Any,
field: Field = None,
verbose_name: str = None,
help_text: str = '',
static: bool = True,
readonly: bool = False
) -> Optional[PrefProxy]:
"""Marks a preference.
:param preference: Preference variable.
:param field: Django model field to represent this preference.
:param verbose_name: Field verbose name.
:param help_text: Field help text.
:param static: Leave this preference static (do not store in DB).
:param readonly: Make this field read only.
"""
try:
bound = bind_proxy(
(preference,),
field=field,
verbose_name=verbose_name,
help_text=help_text,
static=static,
readonly=readonly,
)
return bound[0]
except IndexError:
return
class preferences:
"""Context manager - main entry point for siteprefs.
.. code-block:: python
from siteprefs.toolbox import preferences
with preferences() as prefs:
prefs(
MY_OPTION_1,
prefs.one(MY_OPTION_2, static=False),
prefs.group('My Group', [prefs.one(MY_OPTION_42)]),
)
"""
one = staticmethod(pref)
group = staticmethod(pref_group)
__call__ = register_prefs
def __enter__(self):
patch_locals(3)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
|
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2015-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
# Notes about this script
# 1. This script runs inside arm-none-eabi-gdb-py
# 2. GDB processes commands inside a queue on a worker
# thread. Commands that change the state of the target
# should run from this queue via gdb.post_event(cb).
# 3. After running a command that changes the state of a target
# in the background, like "continue&", the code needs to
# return so the event processing that occurs on the worker
# thread can continue. No target state changes will be
# seen until the worker thread gets a chance to process the data.
# 4. To make the code flow better with #3 the main test function,
# run_test, takes advantage of the 'yield' call. After
# performing a background operation that causes the target run
# 'yield' must be called with a timeout value. The code
# will wait for the target to halt for the time specified and
# if no signal arrives in that time the target is explicitly
# halted.
# 5. Only use packages in the standard library in this script.
# Since the current version of arm-none-eabi-gdb-py.exe is
# only 32 bit on windows, there must be a 32 bit python
# install for it to work. If your primary version of python
# is 64 bit, you can install the 32 bit version as a non
# primary version. This will allow arm-none-eabi-gdb-py.exe
# to work. The only problem with this is that when pyOCD
# is installed through pip only the 64 bit version of
# pyOCD and it's dependencies will get installed.
# If only the standard library is used here then this
# script will have no external dependencies.
import gdb
from time import time
from threading import Timer
from functools import partial
from random import randrange
from itertools import product
import traceback
import json
import sys
# We expect arm-none-eabi-gdb-py to only run Python 2.x. If it moves
# to Python 3, we need to know about it, so print a warning.
print("arm-none-eabi-gdb-py is using Python %s" % sys.version)
if sys.version_info.major != 2:
print("*** Unexpected arm-none-eabi-gdb-py Python version %d! ***" % sys.version_info.major)
DEFAULT_TIMEOUT = 2.0
STACK_OFFSET = 0x800
TEST_RAM_OFFSET = 0x800
MAX_TEST_SIZE = 0x1000
MAX_BKPT = 10
assert STACK_OFFSET < MAX_TEST_SIZE
assert TEST_RAM_OFFSET < MAX_TEST_SIZE
monitor_commands = [
"help",
"help reset",
"help halt",
"init",
"reset",
"reset halt",
"halt",
"arm semihosting enable",
"arm semihosting disable",
"set vector-catch n",
"set vector-catch a",
"set step-into-interrupt on",
"set step-into-interrupt off",
# Invalid Command
"fawehfawoefhad"
]
SIZE_TO_C_TYPE = {
1: "uint8_t*",
2: "uint16_t*",
4: "uint32_t*",
}
TO_GDB_ACCESS = {
"read": gdb.WP_READ,
"write": gdb.WP_WRITE,
"read_write": gdb.WP_ACCESS,
}
def gdb_execute(cmd):
print("Executing command:", cmd)
gdb.execute(cmd)
def test_step_type(step_type):
step_count = 20
start = time()
for _ in range(step_count):
gdb_execute(step_type)
end = time()
seconds_per_operation = (end - start) / step_count
return seconds_per_operation
def is_event_breakpoint(event, bkpt=None):
if not isinstance(event, gdb.BreakpointEvent):
return False
if bkpt is None:
return True
return event.breakpoints[-1] is bkpt
def is_event_signal(event, signal_name):
if not isinstance(event, gdb.SignalEvent):
return False
return event.stop_signal == signal_name
def has_read(name):
if name == "read":
return True
if name == "read_write":
return True
assert name == "write"
return False
def has_write(name):
if name == "write":
return True
if name == "read_write":
return True
assert name == "read"
return False
def size_to_type(size):
return SIZE_TO_C_TYPE[size]
def to_gdb_access(access):
return TO_GDB_ACCESS[access]
def should_trigger_break(bkpt_size, bkpt_access,
bkpt_addr, size, access, addr):
left_access_addr = addr
right_access_addr = addr + size - 1
left_bkpt_addr = bkpt_addr
right_bkpt_addr = bkpt_addr + bkpt_size
overlap = (left_access_addr <= right_bkpt_addr and
right_access_addr >= left_bkpt_addr)
read_break = has_read(bkpt_access) and has_read(access)
write_break = has_write(bkpt_access) and has_write(access)
should_break = overlap and (read_break or write_break)
return should_break
def valid_watchpoint(bkpt_size, bkpt_access, bkpt_addr):
# Unaligned breakpoints are not allowed
return bkpt_addr % bkpt_size == 0
# Initial setup
testn = int(gdb.parse_and_eval("$testn"))
test_param_filename = "test_params%d.txt" % testn
with open(test_param_filename, "rb") as f:
test_params = json.loads(f.read())
def run_test():
test_result = {}
test_port = test_params["test_port"]
rom_start = test_params['rom_start']
ram_start = test_params['ram_start']
ram_length = test_params['ram_length']
invalid_addr = test_params["invalid_start"]
error_on_invalid_access = test_params["expect_error_on_invalid_access"]
ignore_hw_bkpt_result = test_params["ignore_hw_bkpt_result"]
target_test_elf = test_params["test_elf"]
assert ram_length >= MAX_TEST_SIZE
stack_addr = ram_start + STACK_OFFSET
test_ram_addr = ram_start + TEST_RAM_OFFSET
fail_count = 0
try:
# Turn off confirmations that would block the script
gdb_execute("set pagination off")
gdb_execute("set confirm off")
# Allow GDB to access even unmapped regions
gdb_execute("set mem inaccessible-by-default off")
# Set raw logging
gdb_execute("set remotelogfile gdb_test_raw%d.txt" % testn)
# Connect to server
gdb_execute("target remote localhost:%d" % test_port)
# Show memory regions, useful for debug and verification.
gdb_execute("info mem")
# Possibly useful other commands for reference:
# info breakpoints
# info mem
# show code-cache
# show stack-cache
# show dcache
# show mem inaccessible-by-default
# show can-use-hw-watchpoints
# info all-registers
# set logging file gdb.txt
# set logging on
# Test running the monitor commands
for command in monitor_commands:
gdb_execute("mon %s" % command)
# Load target-specific test program into flash.
gdb_execute("load %s" % target_test_elf)
# Reset the target and let it run so it has
# a chance to disable the watchdog
gdb_execute("mon reset halt")
gdb_execute("c&")
event = yield(0.1)
if not is_event_signal(event, "SIGINT"):
fail_count += 1
print("Error - target not interrupted as expected")
# Load test program and symbols
test_binary = "../src/gdb_test_program/gdb_test.bin"
test_elf = "../src/gdb_test_program/gdb_test.elf"
gdb_execute("restore %s binary 0x%x" % (test_binary, ram_start))
gdb_execute("add-symbol-file %s 0x%x" % (test_elf, ram_start))
# Set pc to the test program. Make sure
# interrupts are disabled to prevent
# other code from running.
gdb_execute("set $primask = 1")
gdb_execute("set $sp = 0x%x" % stack_addr)
gdb_execute("b main")
breakpoint = gdb.Breakpoint("main")
gdb_execute("set $pc = main")
gdb_execute("c&")
event = yield(DEFAULT_TIMEOUT)
if not is_event_breakpoint(event, breakpoint):
fail_count += 1
print("Error - could not set pc to function")
breakpoint.delete()
## Stepping removed as a workaround for a GDB bug. Launchpad issue tracking this is here:
## https://bugs.launchpad.net/gcc-arm-embedded/+bug/1700595
#
# # Test the speed of the different step types
# test_result["step_time_si"] = test_step_type("si")
# test_result["step_time_s"] = test_step_type("s")
# test_result["step_time_n"] = test_step_type("n")
test_result["step_time_si"] = -1
test_result["step_time_s"] = -1
test_result["step_time_n"] = -1
# TODO,c1728p9 - test speed getting stack trace
# TODO,c1728p9 - test speed with cache turned on
# TODO,c1728p9 - check speed vs breakpoints
# Let target run to initialize variables
gdb_execute("c&")
event = yield(0.1)
if not is_event_signal(event, "SIGINT"):
fail_count += 1
print("Error - target not interrupted as expected")
# Check number of supported breakpoints, along
# with graceful handling of a request using
# more than the supported number of breakpoints
break_list = []
for i in range(MAX_BKPT):
addr = rom_start + i * 4
breakpoint = gdb.Breakpoint("*0x%x" % addr)
break_list.append(breakpoint)
while True:
try:
gdb_execute("c&")
yield(0.1)
break
except gdb.error:
bkpt = break_list.pop()
bkpt.delete()
test_result["breakpoint_count"] = len(break_list)
for bkpt in break_list:
bkpt.delete()
# Check number of supported watchpoints, along
# with graceful handling of a request using
# more than the supported number of watchpoints
watch_list = []
for i in range(MAX_BKPT):
addr = rom_start + i * 4
breakpoint = gdb.Breakpoint("*0x%x" % addr,
gdb.BP_WATCHPOINT, gdb.WP_ACCESS)
watch_list.append(breakpoint)
while True:
try:
gdb_execute("c&")
yield(0.1)
break
except gdb.error:
bkpt = watch_list.pop()
bkpt.delete()
test_result["watchpoint_count"] = len(watch_list)
for bkpt in watch_list:
bkpt.delete()
# Make sure breakpoint is hit as expected
rmt_func = "breakpoint_test"
gdb_execute("set var run_breakpoint_test = 1")
breakpoint = gdb.Breakpoint(rmt_func)
gdb_execute("c&")
event = yield(DEFAULT_TIMEOUT)
if not is_event_breakpoint(event, breakpoint):
fail_count += 1
print("Error - breakpoint 1 test failed")
func_name = gdb.selected_frame().function().name
if rmt_func != func_name:
fail_count += 1
print("ERROR - break occurred at wrong function %s" % func_name)
breakpoint.delete()
gdb_execute("set var run_breakpoint_test = 0")
# Let target run, make sure breakpoint isn't hit
gdb_execute("set var run_breakpoint_test = 1")
gdb_execute("c&")
event = yield(0.1)
if not is_event_signal(event, "SIGINT"):
fail_count += 1
print("Error - target not interrupted as expected")
gdb_execute("set var run_breakpoint_test = 0")
# Make sure hardware breakpoint is hit as expected
rmt_func = "breakpoint_test"
gdb_execute("set var run_breakpoint_test = 1")
gdb_execute("hbreak %s" % rmt_func)
gdb_execute("c&")
event = yield(DEFAULT_TIMEOUT)
# TODO, c1728p9 - determine why there isn't a breakpoint event returned
# if not is_event_breakpoint(event):
# fail_count += 1
# print("Error - breakpoint 2 test failed")
func_name = gdb.selected_frame().function().name
if rmt_func != func_name and not ignore_hw_bkpt_result:
fail_count += 1
print("ERROR - break occurred at wrong function %s" % func_name)
gdb_execute("clear %s" % rmt_func)
gdb_execute("set var run_breakpoint_test = 0")
# Test valid memory write
addr_value_list = [(test_ram_addr + i * 4,
randrange(1, 50)) for i in range(4)]
for addr, value in addr_value_list:
gdb_execute("set *((int *) 0x%x) = 0x%x" % (addr, value))
# Test invalid memory write
invalid_addr_list = [invalid_addr + i * 4 for i in range(4)]
for addr in invalid_addr_list:
try:
gdb_execute("set *((int *) 0x%x) = 0x%x" % (addr, randrange(1, 50)))
if error_on_invalid_access:
fail_count += 1
print("Error - invalid memory write did not fault @ 0x%x" % addr)
except gdb.MemoryError:
pass
# Test valid memory read
for addr, value in addr_value_list:
val_read = gdb.parse_and_eval("*((int *) 0x%x)" % addr)
val_read = int(val_read)
assert value == val_read
# Test invalid memory read
for addr in invalid_addr_list:
try:
gdb_execute("x 0x%x" % addr)
if error_on_invalid_access:
fail_count += 1
print("Error - invalid memory read did not fault @ 0x%x" % addr)
except gdb.MemoryError:
pass
# Test watchpoints
access_addr = long(gdb.parse_and_eval("&watchpoint_write_buffer[1]"))
bkpt_sizes = [1, 2, 4]
bkpt_accesses = ["read", "write", "read_write"]
# use "range(-4, 8, 1)" for extended testing
bkpt_addresses = [access_addr + offset for offset in range(0, 4, 1)]
sizes = [1, 2, 4]
accesses = ["read", "write", "read_write"]
addresses = [access_addr]
generator = product(bkpt_sizes, bkpt_accesses, bkpt_addresses,
sizes, accesses, addresses)
for bkpt_size, bkpt_access, bkpt_addr, size, access, addr in generator:
gdb_size = size_to_type(bkpt_size)
gdb_access = to_gdb_access(bkpt_access)
gdb_execute("set var watchpoint_write = %i" %
(1 if has_write(access) else 0))
gdb_execute("set var watchpoint_read = %i" %
(1 if has_read(access) else 0))
gdb_execute("set var watchpoint_size = %i" % size)
gdb_execute("set var write_address = %i" % addr)
breakpoint = gdb.Breakpoint("*(%s)0x%x" % (gdb_size, bkpt_addr),
gdb.BP_WATCHPOINT, gdb_access)
# Run until breakpoint is hit
gdb_execute("c&")
event = yield(0.1)
bkpt_hit = not is_event_signal(event, "SIGINT")
# Compare against expected result
should_break = should_trigger_break(bkpt_size, bkpt_access,
bkpt_addr, size, access, addr)
valid = valid_watchpoint(bkpt_size, bkpt_access, bkpt_addr)
if valid and bkpt_hit != should_break:
fail_count += 1
print("Error - watchpoint problem:")
print(" Watchpoint was hit %s" % bkpt_hit)
print(" Watchpoint should be hit %s" % should_break)
print(" bkpt_size %s, bkpt_access %s, bkpt_address 0x%x, "
"size %s, access %s, addr 0x%x" %
(bkpt_size, bkpt_access, bkpt_addr, size, access, addr))
print()
breakpoint.delete()
# TODO,c1728p9 - test reading/writing registers
# TODO,c1728p9 - test stepping into interrupts
# TODO,c1728p9 - test vector catch
# -test hard fault handling
# -test reset catch
# TODO,c1728p9 - test signals/hard fault
if fail_count:
print("Test completed with %i errors" % fail_count)
else:
print("Test completed successfully")
except:
print("Main Error:")
traceback.print_exc()
fail_count += 1
finally:
test_result["fail_count"] = fail_count
test_result_filename = "test_results%d.txt" % testn
with open(test_result_filename, "wb") as f:
f.write(json.dumps(test_result))
gdb_execute("detach")
gdb_execute("quit %i" % fail_count)
ignore_events = True
interrupt_timer = None
interrupt_arg = None
generator = run_test()
# Post task to halt the processor
def post_interrupt_task(interrupt_arg):
# Halt the target by interrupting it
# This must only run on GDB's queue
def interrupt_task():
if not interrupt_arg["aborted"]:
gdb_execute("interrupt")
gdb.post_event(interrupt_task)
# Run the main test by repreatedly calling the generator
# This must only run on GDB's queue
def run_generator(event):
global ignore_events
global interrupt_timer
global interrupt_arg
ignore_events = True
if interrupt_timer is not None:
interrupt_timer.cancel()
interrupt_arg["aborted"] = True
interrupt_arg = None
stop_delay = 0
try:
stop_delay = generator.send(event)
except:
print("Error")
traceback.print_exc()
interrupt_arg = {"aborted": False}
interrupt_timer = Timer(stop_delay, post_interrupt_task, [interrupt_arg])
interrupt_timer.start()
ignore_events = False
# Runs on stop events and posts run_generator to the
# main queue so it can continue execution
def stop_handler(event):
if ignore_events:
return
bound_run_generator = partial(run_generator, event)
gdb.post_event(bound_run_generator)
gdb.events.stop.connect(stop_handler)
# Start testing
bound_run_generator = partial(run_generator, None)
gdb.post_event(bound_run_generator)
|
|
"""
Contains a variety of sensory models, specifically models for the
visual pathway.
"""
import topo
import param
import numbergen
import lancet
import numpy
import imagen
from imagen.patterncoordinator import PatternCoordinator, PatternCoordinatorImages
from topo.base.arrayutil import DivideWithConstant
from topo.submodel import Model, ArraySpec # pyflakes:ignore (API import)
from topo import sheet, transferfn, optimized
from collections import OrderedDict
class SensoryModel(Model):
dims = param.List(default=['xy'],class_=str,doc="""
Stimulus dimensions to include, out of the possible list:
:'xy': Position in x and y coordinates""")
num_inputs = param.Integer(default=2,bounds=(1,None),doc="""
How many input patterns to present per unit area at each
iteration, when using discrete patterns (e.g. Gaussians).""")
class VisualInputModel(SensoryModel):
allowed_dims = ['xy', 'or', 'od', 'dy', 'dr', 'sf']
period = param.Number(default=None, allow_None=True, doc="""
Simulation time between pattern updates on the generator
sheets. If None, the model is allowed to compute an appropriate
value for the period property (a period of 1.0 is typical)""")
dataset = param.ObjectSelector(default='Gaussian',objects=
['Gaussian','Nature','FoliageA','FoliageB'],doc="""
Set of input patterns to use::
:'Gaussian': Two-dimensional Gaussians
:'Nature': Shouval's 1999 monochrome 256x256 images
:'FoliageA': McGill calibrated LMS foliage/ image subset (5)
:'FoliageB': McGill calibrated LMS foliage/ image subset (25)""")
dims = param.List(default=['xy','or'],class_=str,doc="""
Stimulus dimensions to include, out of the possible list:
:'xy': Position in x and y coordinates
:'or': Orientation
:'od': Ocular dominance
:'dy': Disparity
:'dr': Direction of motion
:'sf': Spatial frequency
:'cr': Color (if available, see submodels.color)""")
area = param.Number(default=1.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
Linear size of cortical area to simulate.
2.0 gives a 2.0x2.0 Sheet area in V1.""")
dim_fraction = param.Number(default=0.7,bounds=(0.0,1.0),doc="""
Fraction by which the input brightness varies between the two
eyes. Only used if 'od' in 'dims'.""")
contrast=param.Number(default=70, bounds=(0,100),doc="""
Brightness of the input patterns as a contrast (percent). Only
used if 'od' not in 'dims'.""")
sf_spacing=param.Number(default=2.0,bounds=(1,None),doc="""
Determines the factor by which successive SF channels increase
in size. Only used if 'sf' in 'dims'.""")
sf_channels=param.Integer(default=2,bounds=(1,None),softbounds=(1,4),doc="""
Number of spatial frequency channels. Only used if 'sf' in 'dims'.""")
max_disparity = param.Number(default=4.0,bounds=(0,None),doc="""
Maximum disparity between input pattern positions in the left
and right eye. Only used if 'dy' in 'dims'.""")
num_lags = param.Integer(default=4, bounds=(1,None),doc="""
Number of successive frames before showing a new input
pattern. This also determines the number of connections
between each individual LGN sheet and V1. Only used if 'dr' in
'dims'.""")
speed=param.Number(default=2.0/24.0,bounds=(0,None),
softbounds=(0,3.0/24.0),doc="""
Distance in sheet coordinates between successive frames, when
translating patterns. Only used if 'dr' in 'dims'.""")
align_orientations = param.Boolean(default=None,
allow_None=True, doc="""
Whether or not to align pattern orientations together if
composing multiple patterns together. If None,
align_orientations will be set to True when speed is non-zero
(and 'dr' in dims), otherwise it is set to False.""")
__abstract = True
def property_setup(self, properties):
disallowed_dims = [dim for dim in self.dims if dim not in self.allowed_dims]
if disallowed_dims:
raise Exception('%s not in the list of allowed dimensions'
% ','.join(repr(d) for d in disallowed_dims))
properties = super(VisualInputModel, self).property_setup(properties)
# The default period for most Topographica models is 1.0
properties['period'] = 1.0 if self.period is None else self.period
properties['binocular'] = 'od' in self.dims or 'dy' in self.dims
properties['SF']=range(1,self.sf_channels+1) if 'sf' in self.dims else [1]
properties['lags'] = range(self.num_lags) if 'dr' in self.dims else [0]
if 'dr' in self.dims and not numbergen.RandomDistribution.time_dependent:
numbergen.RandomDistribution.time_dependent = True
self.message('Setting time_dependent to True for motion model.')
return properties
def training_pattern_setup(self, **overrides):
# all the below will eventually end up in PatternCoordinator!
disparity_bound = 0.0
position_bound_x = self.area/2.0+0.25
position_bound_y = self.area/2.0+0.25
if 'dy' in self.dims:
disparity_bound = self.max_disparity*0.041665/2.0
#TFALERT: Formerly: position_bound_x = self.area/2.0+0.2
position_bound_x -= disparity_bound
align_orientations = (bool(self.speed) and ('dr' in self.dims)
if self.align_orientations is None
else self.align_orientations)
if 'dr' in self.dims:
position_bound_x+=self.speed*max(self['lags'])
position_bound_y+=self.speed*max(self['lags'])
pattern_labels=['LeftRetina','RightRetina'] if self['binocular'] else ['Retina']
# all the above will eventually end up in PatternCoordinator!
params = dict(features_to_vary=self.dims,
pattern_labels=pattern_labels,
pattern_parameters={'size': 0.088388 if 'or' in self.dims and self.dataset=='Gaussian' \
else 3*0.088388 if self.dataset=='Gaussian' else 10.0,
'aspect_ratio': 4.66667 if 'or' in self.dims else 1.0,
'scale': self.contrast/100.0},
disparity_bound=disparity_bound,
position_bound_x=position_bound_x,
position_bound_y=position_bound_y,
dim_fraction=self.dim_fraction,
reset_period=(max(self['lags'])*self['period'] + self['period']),
speed=self.speed,
align_orientations = align_orientations,
sf_spacing=self.sf_spacing,
sf_max_channel=max(self['SF']),
patterns_per_label=int(self.num_inputs*self.area*self.area))
if self.dataset=='Gaussian':
return PatternCoordinator(**dict(params, **overrides))()
else:
image_folder = 'images/shouval' if self.dataset=='Nature' \
else 'images/mcgill/foliage_a_combined' if self.dataset=='FoliageA' \
else 'images/mcgill/foliage_b_combined' if self.dataset=='FoliageB' \
else None
return PatternCoordinatorImages(image_folder, **dict(params, **overrides))()
@Model.definition
class EarlyVisionModel(VisualInputModel):
retina_density = param.Number(default=24.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
The nominal_density to use for the retina.""")
lgn_density = param.Number(default=24.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
The nominal_density to use for the LGN.""")
lgnaff_strength = param.Number(default=2.33, doc="""
Overall strength of the afferent projection from the retina to
the LGN sheets.""")
lgnaff_radius=param.Number(default=0.375,bounds=(0,None),doc="""
Connection field radius of a unit in the LGN level to units in
a retina sheet.""")
lgnlateral_radius=param.Number(default=0.5,bounds=(0,None),doc="""
Connection field radius of a unit in the LGN level to
surrounding units, in case gain control is used.""")
v1aff_radius=param.Number(default=0.27083,bounds=(0,None),doc="""
Connection field radius of a unit in V1 to units in a LGN
sheet.""")
center_size = param.Number(default=0.07385,bounds=(0,None),doc="""
The size of the central Gaussian used to compute the
center-surround receptive field.""")
surround_size = param.Number(default=4*0.07385,bounds=(0,None),doc="""
The size of the surround Gaussian used to compute the
center-surround receptive field.""")
gain_control_size = param.Number(default=0.25,bounds=(0,None),doc="""
The size of the divisive inhibitory suppressive field used for
contrast-gain control in the LGN sheets. This also acts as the
corresponding bounds radius.""")
gain_control = param.Boolean(default=True,doc="""
Whether to use divisive lateral inhibition in the LGN for
contrast gain control.""")
gain_control_SF = param.Boolean(default=True,doc="""
Whether to use divisive lateral inhibition in the LGN for
contrast gain control across Spatial Frequency Sheets.""")
def property_setup(self, properties):
properties = super(EarlyVisionModel, self).property_setup(properties)
sheet.SettlingCFSheet.joint_norm_fn = optimized.compute_joint_norm_totals_cython
center_polarities=['On','Off']
# Useful for setting up sheets
properties['polarities'] = lancet.List('polarity', center_polarities)
properties['eyes'] = (lancet.List('eye', ['Left','Right'])
if properties['binocular'] else lancet.Identity())
properties['SFs'] = (lancet.List('SF', properties['SF'])
if max(properties['SF'])>1 else lancet.Identity())
return properties
def sheet_setup(self):
sheets = OrderedDict()
sheets['Retina'] = self['eyes']
sheets['LGN'] = self['polarities'] * self['eyes'] * self['SFs']
return sheets
@Model.GeneratorSheet
def Retina(self, properties):
return Model.GeneratorSheet.params(
period=self['period'],
phase=0.05,
nominal_density=self.retina_density,
nominal_bounds=sheet.BoundingBox(radius=self.area/2.0
+ self.v1aff_radius*self.sf_spacing**(max(self['SF'])-1)
+ self.lgnaff_radius*self.sf_spacing**(max(self['SF'])-1)
+ self.lgnlateral_radius),
input_generator=self['training_patterns'][properties['eye']+'Retina'
if 'eye' in properties
else 'Retina'])
@Model.SettlingCFSheet
def LGN(self, properties):
channel=properties['SF'] if 'SF' in properties else 1
sf_aff_multiplier = self.sf_spacing**(max(self['SF'])-1) if self.gain_control_SF else \
self.sf_spacing**(channel-1)
gain_control = self.gain_control_SF if 'SF' in properties else self.gain_control
return Model.SettlingCFSheet.params(
mask = topo.base.projection.SheetMask(),
measure_maps=False,
output_fns=[transferfn.misc.HalfRectify()],
nominal_density=self.lgn_density,
nominal_bounds=sheet.BoundingBox(radius=self.area/2.0
+ self.v1aff_radius
* sf_aff_multiplier
+ self.lgnlateral_radius),
tsettle=2 if gain_control else 0,
strict_tsettle=1 if gain_control else 0)
@Model.matchconditions('LGN', 'afferent')
def afferent_conditions(self, properties):
return {'level': 'Retina', 'eye': properties.get('eye',None)}
@Model.SharedWeightCFProjection
def afferent(self, src_properties, dest_properties):
channel = dest_properties['SF'] if 'SF' in dest_properties else 1
centerg = imagen.Gaussian(size=self.center_size*self.sf_spacing**(channel-1),
aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()])
surroundg = imagen.Gaussian(size=self.surround_size*self.sf_spacing**(channel-1),
aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()])
on_weights = imagen.Composite(generators=[centerg,surroundg],operator=numpy.subtract)
off_weights = imagen.Composite(generators=[surroundg,centerg],operator=numpy.subtract)
return Model.SharedWeightCFProjection.params(
delay=0.05,
strength=self.lgnaff_strength,
name='Afferent',
nominal_bounds_template=sheet.BoundingBox(radius=self.lgnaff_radius
*self.sf_spacing**(channel-1)),
weights_generator=on_weights if dest_properties['polarity']=='On' else off_weights)
@Model.matchconditions('LGN', 'lateral_gain_control')
def lateral_gain_control_conditions(self, properties):
return ({'level': 'LGN', 'polarity':properties['polarity']}
if self.gain_control and self.gain_control_SF else
{'level': 'LGN', 'polarity':properties['polarity'],
'SF': properties.get('SF',None)}
if self.gain_control else None)
@Model.SharedWeightCFProjection
def lateral_gain_control(self, src_properties, dest_properties):
#TODO: Are those 0.25 the same as lgnlateral_radius/2.0?
name='LateralGC'
if 'eye' in src_properties:
name+=src_properties['eye']
if 'SF' in src_properties and self.gain_control_SF:
name+=('SF'+str(src_properties['SF']))
return Model.SharedWeightCFProjection.params(
delay=0.05,
dest_port=('Activity'),
activity_group=(0.6,DivideWithConstant(c=0.11)),
weights_generator=imagen.Gaussian(size=self.gain_control_size,
aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()]),
nominal_bounds_template=sheet.BoundingBox(radius=self.gain_control_size),
name=name,
strength=0.6/(2 if self['binocular'] else 1))
|
|
# mssql/pyodbc.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mssql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mssql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
Connecting to PyODBC
--------------------
The URL here is to be translated to PyODBC connection strings, as
detailed in `ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_.
DSN Connections
^^^^^^^^^^^^^^^
A DSN-based connection is **preferred** overall when using ODBC. A
basic DSN-based connection looks like::
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
Which above, will pass the following connection string to PyODBC::
dsn=mydsn;UID=user;PWD=pass
If the username and password are omitted, the DSN form will also add
the ``Trusted_Connection=yes`` directive to the ODBC string.
Hostname Connections
^^^^^^^^^^^^^^^^^^^^
Hostname-based connections are **not preferred**, however are supported.
The ODBC driver name must be explicitly specified::
engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0")
.. versionchanged:: 1.0.0 Hostname-based PyODBC connections now require the
SQL Server driver name specified explicitly. SQLAlchemy cannot
choose an optimal default here as it varies based on platform
and installed drivers.
Other keywords interpreted by the Pyodbc dialect to be passed to
``pyodbc.connect()`` in both the DSN and hostname cases include:
``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``.
Pass through exact Pyodbc string
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A PyODBC connection string can also be sent exactly as specified in
`ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_
into the driver using the parameter ``odbc_connect``. The delimeters must be URL escaped, however,
as illustrated below using ``urllib.quote_plus``::
import urllib
params = urllib.quote_plus("DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password")
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
Unicode Binds
-------------
The current state of PyODBC on a unix backend with FreeTDS and/or
EasySoft is poor regarding unicode; different OS platforms and versions of
UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself
dramatically alter how strings are received. The PyODBC dialect attempts to
use all the information it knows to determine whether or not a Python unicode
literal can be passed directly to the PyODBC driver or not; while SQLAlchemy
can encode these to bytestrings first, some users have reported that PyODBC
mis-handles bytestrings for certain encodings and requires a Python unicode
object, while the author has observed widespread cases where a Python unicode
is completely misinterpreted by PyODBC, particularly when dealing with
the information schema tables used in table reflection, and the value
must first be encoded to a bytestring.
It is for this reason that whether or not unicode literals for bound
parameters be sent to PyODBC can be controlled using the
``supports_unicode_binds`` parameter to ``create_engine()``. When
left at its default of ``None``, the PyODBC dialect will use its
best guess as to whether or not the driver deals with unicode literals
well. When ``False``, unicode literals will be encoded first, and when
``True`` unicode literals will be passed straight through. This is an interim
flag that hopefully should not be needed when the unicode situation stabilizes
for unix + PyODBC.
.. versionadded:: 0.7.7
``supports_unicode_binds`` parameter to ``create_engine()``\ .
Rowcount Support
----------------
Pyodbc only has partial support for rowcount. See the notes at
:ref:`mssql_rowcount_versioning` for important notes when using ORM
versioning.
"""
from .base import MSExecutionContext, MSDialect, VARBINARY
from ...connectors.pyodbc import PyODBCConnector
from ... import types as sqltypes, util, exc
import decimal
import re
class _ms_numeric_pyodbc(object):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
The routines here are needed for older pyodbc versions
as well as current mxODBC versions.
"""
def bind_processor(self, dialect):
super_process = super(_ms_numeric_pyodbc, self).\
bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
elif adjusted > 7:
return self._large_dec_to_string(value)
if super_process:
return super_process(value)
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and '-' or ''),
'0' * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]))
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if 'E' in str(value):
result = "%s%s%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int) - 1)))
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]),
"".join(
[str(s) for s in _int][value.adjusted() + 1:]))
else:
result = "%s%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]))
return result
class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
pass
class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
pass
class _VARBINARY_pyodbc(VARBINARY):
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
# pyodbc-specific
return dialect.dbapi.BinaryNull
return process
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
"""where appropriate, issue "select scope_identity()" in the same
statement.
Background on why "scope_identity()" is preferable to "@@identity":
http://msdn.microsoft.com/en-us/library/ms190315.aspx
Background on why we attempt to embed "scope_identity()" into the same
statement as the INSERT:
http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
"""
super(MSExecutionContext_pyodbc, self).pre_exec()
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if self._select_lastrowid and \
self.dialect.use_scope_identity and \
len(self.parameters[0]):
self._embedded_scope_identity = True
self.statement += "; select scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error as e:
# no way around this - nextset() consumes the previous set
# so we need to just keep flipping
self.cursor.nextset()
self._lastrowid = int(row[0])
else:
super(MSExecutionContext_pyodbc, self).post_exec()
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
execution_ctx_cls = MSExecutionContext_pyodbc
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pyodbc,
sqltypes.Float: _MSFloat_pyodbc,
VARBINARY: _VARBINARY_pyodbc,
sqltypes.LargeBinary: _VARBINARY_pyodbc,
}
)
def __init__(self, description_encoding=None, **params):
if 'description_encoding' in params:
self.description_encoding = params.pop('description_encoding')
super(MSDialect_pyodbc, self).__init__(**params)
self.use_scope_identity = self.use_scope_identity and \
self.dbapi and \
hasattr(self.dbapi.Cursor, 'nextset')
self._need_decimal_fix = self.dbapi and \
self._dbapi_version() < (2, 1, 8)
def _get_server_version_info(self, connection):
try:
raw = connection.scalar("SELECT SERVERPROPERTY('ProductVersion')")
except exc.DBAPIError:
# SQL Server docs indicate this function isn't present prior to
# 2008; additionally, unknown combinations of pyodbc aren't
# able to run this query.
return super(MSDialect_pyodbc, self).\
_get_server_version_info(connection)
else:
version = []
r = re.compile(r'[.\-]')
for n in r.split(raw):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def do_rollback(self, connection):
pass
dialect = MSDialect_pyodbc
|
|
#!/usr/bin/env python
import weakref
import logging
l = logging.getLogger("claripy.frontends.replacement_frontend")
from .constrained_frontend import ConstrainedFrontend
class ReplacementFrontend(ConstrainedFrontend):
def __init__(self, actual_frontend, allow_symbolic=None, replacements=None, replacement_cache=None, unsafe_replacement=None, complex_auto_replace=None, auto_replace=None, replace_constraints=None, **kwargs):
super(ReplacementFrontend, self).__init__(**kwargs)
self._actual_frontend = actual_frontend
self._allow_symbolic = True if allow_symbolic is None else allow_symbolic
self._auto_replace = True if auto_replace is None else auto_replace
self._complex_auto_replace = False if complex_auto_replace is None else complex_auto_replace
self._replace_constraints = False if replace_constraints is None else replace_constraints
self._unsafe_replacement = False if unsafe_replacement is None else unsafe_replacement
self._replacements = {} if replacements is None else replacements
self._replacement_cache = weakref.WeakKeyDictionary() if replacement_cache is None else replacement_cache
self._validation_frontend = None
def _blank_copy(self, c):
super(ReplacementFrontend, self)._blank_copy(c)
c._actual_frontend = self._actual_frontend.blank_copy()
c._allow_symbolic = self._allow_symbolic
c._auto_replace = self._auto_replace
c._complex_auto_replace = self._complex_auto_replace
c._replace_constraints = self._replace_constraints
c._unsafe_replacement = self._unsafe_replacement
c._replacements = {}
c._replacement_cache = weakref.WeakKeyDictionary()
if self._validation_frontend is not None:
c._validation_frontend = self._validation_frontend.blank_copy()
else:
c._validation_frontend = None
def _copy(self, c):
super(ReplacementFrontend, self)._copy(c)
self._actual_frontend._copy(c._actual_frontend)
if self._validation_frontend is not None:
self._validation_frontend._copy(c._validation_frontend)
c._replacements = self._replacements
c._replacement_cache = self._replacement_cache
#
# Replacements
#
def add_replacement(self, old, new, invalidate_cache=True, replace=True, promote=True):
if not isinstance(old, Base):
return
if old is new:
return
if not replace and old.cache_key in self._replacements:
return
if not promote and old.cache_key in self._replacement_cache:
return
if not isinstance(new, Base):
if isinstance(new, bool):
new = BoolV(new)
elif isinstance(new, (int, long)):
new = BVV(new, old.length)
else:
return
if invalidate_cache:
self._replacements = dict(self._replacements)
self._replacement_cache = weakref.WeakKeyDictionary(self._replacements)
self._replacements[old.cache_key] = new
self._replacement_cache[old.cache_key] = new
def _replacement(self, old):
if not isinstance(old, Base):
return old
if old.cache_key in self._replacement_cache:
return self._replacement_cache[old.cache_key]
else:
new = old.replace_dict(self._replacement_cache)
if new is not old:
self._replacement_cache[old.cache_key] = new
return new
def _add_solve_result(self, e, er, r):
if not self._auto_replace:
return
if not isinstance(e, Base) or not e.symbolic:
return
if er.symbolic:
return
self.add_replacement(e, r, invalidate_cache=False)
#
# Storable support
#
def downsize(self):
self._actual_frontend.downsize()
self._replacement_cache.clear()
def _ana_getstate(self):
return (
self._allow_symbolic,
self._unsafe_replacement,
self._complex_auto_replace,
self._auto_replace,
self._replace_constraints,
self._replacements,
self._actual_frontend,
self._validation_frontend,
super(ReplacementFrontend, self)._ana_getstate()
)
def _ana_setstate(self, s):
(
self._allow_symbolic,
self._unsafe_replacement,
self._complex_auto_replace,
self._auto_replace,
self._replace_constraints,
self._replacements,
self._actual_frontend,
self._validation_frontend,
base_state
) = s
super(ReplacementFrontend, self)._ana_setstate(base_state)
self._replacement_cache = weakref.WeakKeyDictionary(self._replacements)
#
# Replacement solving
#
def _replace_list(self, lst):
return tuple(self._replacement(c) for c in lst)
def eval(self, e, n, extra_constraints=(), exact=None):
er = self._replacement(e)
ecr = self._replace_list(extra_constraints)
r = self._actual_frontend.eval(er, n, extra_constraints=ecr, exact=exact)
if self._unsafe_replacement: self._add_solve_result(e, er, r[0])
return r
def batch_eval(self, exprs, n, extra_constraints=(), exact=None):
er = self._replace_list(exprs)
ecr = self._replace_list(extra_constraints)
r = self._actual_frontend.batch_eval(er, n, extra_constraints=ecr, exact=exact)
if self._unsafe_replacement:
for i, original in enumerate(exprs):
self._add_solve_result(original, er[i], r[0][i])
return r
def max(self, e, extra_constraints=(), exact=None):
er = self._replacement(e)
ecr = self._replace_list(extra_constraints)
r = self._actual_frontend.max(er, extra_constraints=ecr, exact=exact)
if self._unsafe_replacement: self._add_solve_result(e, er, r)
return r
def min(self, e, extra_constraints=(), exact=None):
er = self._replacement(e)
ecr = self._replace_list(extra_constraints)
r = self._actual_frontend.min(er, extra_constraints=ecr, exact=exact)
if self._unsafe_replacement: self._add_solve_result(e, er, r)
return r
def solution(self, e, v, extra_constraints=(), exact=None):
er = self._replacement(e)
vr = self._replacement(v)
ecr = self._replace_list(extra_constraints)
r = self._actual_frontend.solution(er, vr, extra_constraints=ecr, exact=exact)
if self._unsafe_replacement and r and (not isinstance(vr, Base) or not vr.symbolic):
self._add_solve_result(e, er, vr)
return r
def is_true(self, e, extra_constraints=(), exact=None):
er = self._replacement(e)
ecr = self._replace_list(extra_constraints)
return self._actual_frontend.is_true(er, extra_constraints=ecr, exact=exact)
def is_false(self, e, extra_constraints=(), exact=None):
er = self._replacement(e)
ecr = self._replace_list(extra_constraints)
return self._actual_frontend.is_false(er, extra_constraints=ecr, exact=exact)
def satisfiable(self, extra_constraints=(), exact=None):
ecr = self._replace_list(extra_constraints)
return self._actual_frontend.satisfiable(extra_constraints=ecr, exact=exact)
def _concrete_value(self, e):
c = super(ReplacementFrontend, self)._concrete_value(e)
if c is not None: return c
cr = self._replacement(e)
for b in backends._eager_backends:
try: return b.eval(cr, 1)[0]
except BackendError: pass
return None
def _concrete_constraint(self, e):
c = super(ReplacementFrontend, self)._concrete_value(e)
if c is not None: return c
#if er.is_false():
# raise UnsatError("Replacement frontend received False constraint after replacement.")
if self._replace_constraints:
er = self._replacement(e)
return super(ReplacementFrontend, self)._concrete_constraint(er)
else:
return super(ReplacementFrontend, self)._concrete_constraint(e)
def add(self, constraints, **kwargs):
if self._auto_replace:
for c in constraints:
# the badass thing here would be to use the *replaced* constraint, but
# we don't currently support chains of replacements, so we'll do a
# less effective flat-replacement with the original constraint
# rc = self._replacement(c)
rc = c
if not isinstance(rc, Base) or not rc.symbolic:
continue
if not self._complex_auto_replace:
if rc.op == 'Not':
self.add_replacement(c.args[0], false, replace=False, promote=True, invalidate_cache=True)
elif rc.op == '__eq__' and rc.args[0].symbolic ^ rc.args[1].symbolic:
old, new = rc.args if rc.args[0].symbolic else rc.args[::-1]
self.add_replacement(old, new, replace=False, promote=True, invalidate_cache=True)
else:
satisfiable, replacements = Balancer(backends.vsa, rc, validation_frontend=self._validation_frontend).compat_ret
if not satisfiable:
self.add_replacement(rc, false)
for old, new in replacements:
if old.cardinality == 1:
continue
rold = self._replacement(old)
if rold.cardinality == 1:
continue
self.add_replacement(old, rold.intersection(new))
added = super(ReplacementFrontend, self).add(constraints, **kwargs)
cr = self._replace_list(added)
if not self._allow_symbolic and any(c.symbolic for c in cr):
raise ClaripyFrontendError(
"symbolic constraints made it into ReplacementFrontend with allow_symbolic=False")
self._actual_frontend.add(cr, **kwargs)
return added
from ..ast.base import Base
from ..ast.bv import BVV
from ..ast.bool import BoolV, false
from ..errors import ClaripyFrontendError, BackendError
from ..balancer import Balancer
from ..backend_manager import backends
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
from quantum import context
from quantum.openstack.common import uuidutils
from quantum.plugins.nec.common import config
from quantum.plugins.nec.db import api as ndb
from quantum.plugins.nec.db import models as nmodels # noqa
from quantum.plugins.nec import ofc_manager
from quantum.tests import base
class OFCManagerTestBase(base.BaseTestCase):
"""Class conisting of OFCManager unit tests"""
def setUp(self):
super(OFCManagerTestBase, self).setUp()
driver = "quantum.tests.unit.nec.stub_ofc_driver.StubOFCDriver"
config.CONF.set_override('driver', driver, 'OFC')
ndb.initialize()
self.addCleanup(ndb.clear_db)
self.ofc = ofc_manager.OFCManager()
self.ctx = context.get_admin_context()
def get_random_params(self):
"""create random parameters for portinfo test"""
tenant = uuidutils.generate_uuid()
network = uuidutils.generate_uuid()
port = uuidutils.generate_uuid()
_filter = uuidutils.generate_uuid()
none = uuidutils.generate_uuid()
return tenant, network, port, _filter, none
class OFCManagerTest(OFCManagerTestBase):
def testa_create_ofc_tenant(self):
"""test create ofc_tenant"""
t, n, p, f, none = self.get_random_params()
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t))
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t))
tenant = ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t)
self.assertEqual(tenant.ofc_id, "ofc-" + t[:-4])
def testb_exists_ofc_tenant(self):
"""test exists_ofc_tenant"""
t, n, p, f, none = self.get_random_params()
self.assertFalse(self.ofc.exists_ofc_tenant(self.ctx, t))
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertTrue(self.ofc.exists_ofc_tenant(self.ctx, t))
def testc_delete_ofc_tenant(self):
"""test delete ofc_tenant"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t))
self.ofc.delete_ofc_tenant(self.ctx, t)
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t))
def testd_create_ofc_network(self):
"""test create ofc_network"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n))
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n))
network = ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)
self.assertEqual(network.ofc_id, "ofc-" + n[:-4])
def teste_exists_ofc_network(self):
"""test exists_ofc_network"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertFalse(self.ofc.exists_ofc_network(self.ctx, n))
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertTrue(self.ofc.exists_ofc_network(self.ctx, n))
def testf_delete_ofc_network(self):
"""test delete ofc_network"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n))
self.ofc.delete_ofc_network(self.ctx, n, {'tenant_id': t})
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n))
def testg_create_ofc_port(self):
"""test create ofc_port"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
ndb.add_portinfo(self.ctx.session, p, "0xabc", 1, 65535,
"00:11:22:33:44:55")
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p))
port = {'tenant_id': t, 'network_id': n}
self.ofc.create_ofc_port(self.ctx, p, port)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p))
port = ndb.get_ofc_item(self.ctx.session, 'ofc_port', p)
self.assertEqual(port.ofc_id, "ofc-" + p[:-4])
def testh_exists_ofc_port(self):
"""test exists_ofc_port"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
ndb.add_portinfo(self.ctx.session, p, "0xabc", 2, 65535,
"00:12:22:33:44:55")
self.assertFalse(self.ofc.exists_ofc_port(self.ctx, p))
port = {'tenant_id': t, 'network_id': n}
self.ofc.create_ofc_port(self.ctx, p, port)
self.assertTrue(self.ofc.exists_ofc_port(self.ctx, p))
def testi_delete_ofc_port(self):
"""test delete ofc_port"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
ndb.add_portinfo(self.ctx.session, p, "0xabc", 3, 65535,
"00:13:22:33:44:55")
port = {'tenant_id': t, 'network_id': n}
self.ofc.create_ofc_port(self.ctx, p, port)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p))
self.ofc.delete_ofc_port(self.ctx, p, port)
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p))
def testj_create_ofc_packet_filter(self):
"""test create ofc_filter"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertFalse(ndb.get_ofc_item(self.ctx.session,
'ofc_packet_filter', f))
pf = {'tenant_id': t, 'network_id': n}
self.ofc.create_ofc_packet_filter(self.ctx, f, pf)
self.assertTrue(ndb.get_ofc_item(self.ctx.session,
'ofc_packet_filter', f))
_filter = ndb.get_ofc_item(self.ctx.session, 'ofc_packet_filter', f)
self.assertEqual(_filter.ofc_id, "ofc-" + f[:-4])
def testk_exists_ofc_packet_filter(self):
"""test exists_ofc_packet_filter"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertFalse(self.ofc.exists_ofc_packet_filter(self.ctx, f))
pf = {'tenant_id': t, 'network_id': n}
self.ofc.create_ofc_packet_filter(self.ctx, f, pf)
self.assertTrue(self.ofc.exists_ofc_packet_filter(self.ctx, f))
def testl_delete_ofc_packet_filter(self):
"""test delete ofc_filter"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
pf = {'tenant_id': t, 'network_id': n}
self.ofc.create_ofc_packet_filter(self.ctx, f, pf)
self.assertTrue(ndb.get_ofc_item(self.ctx.session,
'ofc_packet_filter', f))
self.ofc.delete_ofc_packet_filter(self.ctx, f)
self.assertFalse(ndb.get_ofc_item(self.ctx.session,
'ofc_packet_filter', f))
class OFCManagerTestWithOldMapping(OFCManagerTestBase):
def test_exists_ofc_tenant(self):
t, n, p, f, none = self.get_random_params()
ofc_t, ofc_n, ofc_p, ofc_f, ofc_none = self.get_random_params()
self.assertFalse(self.ofc.exists_ofc_tenant(self.ctx, t))
session = self.ctx.session
ndb.add_ofc_item(session, 'ofc_tenant', t, ofc_t, old_style=True)
self.assertTrue(self.ofc.exists_ofc_tenant(self.ctx, t))
def test_delete_ofc_tenant(self):
t, n, p, f, none = self.get_random_params()
ofc_t, ofc_n, ofc_p, ofc_f, ofc_none = self.get_random_params()
self.assertFalse(self.ofc.exists_ofc_tenant(self.ctx, t))
session = self.ctx.session
ndb.add_ofc_item(session, 'ofc_tenant', t, ofc_t, old_style=True)
self.assertTrue(self.ofc.exists_ofc_tenant(self.ctx, t))
self.ofc.delete_ofc_tenant(self.ctx, t)
self.assertFalse(self.ofc.exists_ofc_tenant(self.ctx, t))
def test_exists_ofc_network(self):
t, n, p, f, none = self.get_random_params()
ofc_t, ofc_n, ofc_p, ofc_f, ofc_none = self.get_random_params()
self.assertFalse(self.ofc.exists_ofc_network(self.ctx, n))
session = self.ctx.session
ndb.add_ofc_item(session, 'ofc_network', n, ofc_n, old_style=True)
self.assertTrue(self.ofc.exists_ofc_network(self.ctx, n))
def test_delete_ofc_network(self):
t, n, p, f, none = self.get_random_params()
ofc_t, ofc_n, ofc_p, ofc_f, ofc_none = self.get_random_params()
self.assertFalse(self.ofc.exists_ofc_network(self.ctx, n))
session = self.ctx.session
ndb.add_ofc_item(session, 'ofc_network', n, ofc_n, old_style=True)
self.assertTrue(self.ofc.exists_ofc_network(self.ctx, n))
net = {'tenant_id': t}
self.ofc.delete_ofc_network(self.ctx, n, net)
self.assertFalse(self.ofc.exists_ofc_network(self.ctx, n))
def test_exists_ofc_port(self):
t, n, p, f, none = self.get_random_params()
ofc_t, ofc_n, ofc_p, ofc_f, ofc_none = self.get_random_params()
self.assertFalse(self.ofc.exists_ofc_port(self.ctx, p))
session = self.ctx.session
ndb.add_ofc_item(session, 'ofc_port', p, ofc_p, old_style=True)
self.assertTrue(self.ofc.exists_ofc_port(self.ctx, p))
def test_delete_ofc_port(self):
t, n, p, f, none = self.get_random_params()
ofc_t, ofc_n, ofc_p, ofc_f, ofc_none = self.get_random_params()
self.assertFalse(self.ofc.exists_ofc_port(self.ctx, p))
session = self.ctx.session
ndb.add_ofc_item(session, 'ofc_port', p, ofc_p, old_style=True)
self.assertTrue(self.ofc.exists_ofc_port(self.ctx, p))
port = {'tenant_id': t, 'network_id': n}
self.ofc.delete_ofc_port(self.ctx, p, port)
self.assertFalse(self.ofc.exists_ofc_port(self.ctx, p))
def test_exists_ofc_packet_filter(self):
t, n, p, f, none = self.get_random_params()
ofc_t, ofc_n, ofc_p, ofc_f, ofc_none = self.get_random_params()
self.assertFalse(self.ofc.exists_ofc_packet_filter(self.ctx, f))
session = self.ctx.session
ndb.add_ofc_item(session, 'ofc_packet_filter', f, ofc_f,
old_style=True)
self.assertTrue(self.ofc.exists_ofc_packet_filter(self.ctx, f))
def test_delete_ofc_packet_filter(self):
t, n, p, f, none = self.get_random_params()
ofc_t, ofc_n, ofc_p, ofc_f, ofc_none = self.get_random_params()
self.assertFalse(self.ofc.exists_ofc_packet_filter(self.ctx, f))
session = self.ctx.session
ndb.add_ofc_item(session, 'ofc_packet_filter', f, ofc_f,
old_style=True)
self.assertTrue(self.ofc.exists_ofc_packet_filter(self.ctx, f))
self.ofc.delete_ofc_packet_filter(self.ctx, f)
self.assertFalse(self.ofc.exists_ofc_packet_filter(self.ctx, f))
|
|
"""NginxParser is a member object of the NginxConfigurator class."""
import copy
import glob
import logging
import os
import pyparsing
import re
from certbot import errors
from certbot_nginx import obj
from certbot_nginx import nginxparser
logger = logging.getLogger(__name__)
class NginxParser(object):
"""Class handles the fine details of parsing the Nginx Configuration.
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar dict parsed: Mapping of file paths to parsed trees
"""
def __init__(self, root, ssl_options):
self.parsed = {}
self.root = os.path.abspath(root)
self.loc = self._set_locations(ssl_options)
# Parse nginx.conf and included files.
# TODO: Check sites-available/ as well. For now, the configurator does
# not enable sites from there.
self.load()
def load(self):
"""Loads Nginx files into a parsed tree.
"""
self.parsed = {}
self._parse_recursively(self.loc["root"])
def _parse_recursively(self, filepath):
"""Parses nginx config files recursively by looking at 'include'
directives inside 'http' and 'server' blocks. Note that this only
reads Nginx files that potentially declare a virtual host.
:param str filepath: The path to the files to parse, as a glob
"""
filepath = self.abs_path(filepath)
trees = self._parse_files(filepath)
for tree in trees:
for entry in tree:
if _is_include_directive(entry):
# Parse the top-level included file
self._parse_recursively(entry[1])
elif entry[0] == ['http'] or entry[0] == ['server']:
# Look for includes in the top-level 'http'/'server' context
for subentry in entry[1]:
if _is_include_directive(subentry):
self._parse_recursively(subentry[1])
elif entry[0] == ['http'] and subentry[0] == ['server']:
# Look for includes in a 'server' context within
# an 'http' context
for server_entry in subentry[1]:
if _is_include_directive(server_entry):
self._parse_recursively(server_entry[1])
def abs_path(self, path):
"""Converts a relative path to an absolute path relative to the root.
Does nothing for paths that are already absolute.
:param str path: The path
:returns: The absolute path
:rtype: str
"""
if not os.path.isabs(path):
return os.path.join(self.root, path)
else:
return path
def get_vhosts(self):
# pylint: disable=cell-var-from-loop
"""Gets list of all 'virtual hosts' found in Nginx configuration.
Technically this is a misnomer because Nginx does not have virtual
hosts, it has 'server blocks'.
:returns: List of :class:`~certbot_nginx.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
enabled = True # We only look at enabled vhosts for now
vhosts = []
servers = {}
for filename in self.parsed:
tree = self.parsed[filename]
servers[filename] = []
srv = servers[filename] # workaround undefined loop var in lambdas
# Find all the server blocks
_do_for_subarray(tree, lambda x: x[0] == ['server'],
lambda x, y: srv.append((x[1], y)))
# Find 'include' statements in server blocks and append their trees
for i, (server, path) in enumerate(servers[filename]):
new_server = self._get_included_directives(server)
servers[filename][i] = (new_server, path)
for filename in servers:
for server, path in servers[filename]:
# Parse the server block into a VirtualHost object
parsed_server = parse_server(server)
vhost = obj.VirtualHost(filename,
parsed_server['addrs'],
parsed_server['ssl'],
enabled,
parsed_server['names'],
server,
path)
vhosts.append(vhost)
return vhosts
def _get_included_directives(self, block):
"""Returns array with the "include" directives expanded out by
concatenating the contents of the included file to the block.
:param list block:
:rtype: list
"""
result = copy.deepcopy(block) # Copy the list to keep self.parsed idempotent
for directive in block:
if _is_include_directive(directive):
included_files = glob.glob(
self.abs_path(directive[1]))
for incl in included_files:
try:
result.extend(self.parsed[incl])
except KeyError:
pass
return result
def _parse_files(self, filepath, override=False):
"""Parse files from a glob
:param str filepath: Nginx config file path
:param bool override: Whether to parse a file that has been parsed
:returns: list of parsed tree structures
:rtype: list
"""
files = glob.glob(filepath) # nginx on unix calls glob(3) for this
# XXX Windows nginx uses FindFirstFile, and
# should have a narrower call here
trees = []
for item in files:
if item in self.parsed and not override:
continue
try:
with open(item) as _file:
parsed = nginxparser.load(_file)
self.parsed[item] = parsed
trees.append(parsed)
except IOError:
logger.warning("Could not open file: %s", item)
except pyparsing.ParseException:
logger.debug("Could not parse file: %s", item)
return trees
def _parse_ssl_options(self, ssl_options):
if ssl_options is not None:
try:
with open(ssl_options) as _file:
return nginxparser.load(_file).spaced
except IOError:
logger.warn("Missing NGINX TLS options file: %s", ssl_options)
except pyparsing.ParseBaseException:
logger.debug("Could not parse file: %s", ssl_options)
return []
def _set_locations(self, ssl_options):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
root = self._find_config_root()
default = root
nginx_temp = os.path.join(self.root, "nginx_ports.conf")
if os.path.isfile(nginx_temp):
listen = nginx_temp
name = nginx_temp
else:
listen = default
name = default
return {"root": root, "default": default, "listen": listen,
"name": name, "ssl_options": self._parse_ssl_options(ssl_options)}
def _find_config_root(self):
"""Find the Nginx Configuration Root file."""
location = ['nginx.conf']
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError(
"Could not find configuration root")
def filedump(self, ext='tmp', lazy=True):
"""Dumps parsed configurations into files.
:param str ext: The file extension to use for the dumped files. If
empty, this overrides the existing conf files.
:param bool lazy: Only write files that have been modified
"""
# Best-effort atomicity is enforced above us by reverter.py
for filename in self.parsed:
tree = self.parsed[filename]
if ext:
filename = filename + os.path.extsep + ext
try:
if lazy and not tree.is_dirty():
continue
out = nginxparser.dumps(tree)
logger.debug('Writing nginx conf tree to %s:\n%s', filename, out)
with open(filename, 'w') as _file:
_file.write(out)
except IOError:
logger.error("Could not open file for writing: %s", filename)
def has_ssl_on_directive(self, vhost):
"""Does vhost have ssl on for all ports?
:param :class:`~certbot_nginx.obj.VirtualHost` vhost: The vhost in question
:returns: True if 'ssl on' directive is included
:rtype: bool
"""
server = vhost.raw
for directive in server:
if not directive or len(directive) < 2:
continue
elif directive[0] == 'ssl' and directive[1] == 'on':
return True
return False
def add_server_directives(self, vhost, directives, replace):
"""Add or replace directives in the server block identified by vhost.
This method modifies vhost to be fully consistent with the new directives.
..note :: If replace is True, this raises a misconfiguration error
if the directive does not already exist.
..note :: If replace is False nothing gets added if an identical
block exists already.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param :class:`~certbot_nginx.obj.VirtualHost` vhost: The vhost
whose information we use to match on
:param list directives: The directives to add
:param bool replace: Whether to only replace existing directives
"""
filename = vhost.filep
try:
result = self.parsed[filename]
for index in vhost.path:
result = result[index]
if not isinstance(result, list) or len(result) != 2:
raise errors.MisconfigurationError("Not a server block.")
result = result[1]
_add_directives(result, directives, replace)
# update vhost based on new directives
new_server = self._get_included_directives(result)
parsed_server = parse_server(new_server)
vhost.addrs = parsed_server['addrs']
vhost.ssl = parsed_server['ssl']
vhost.names = parsed_server['names']
vhost.raw = new_server
except errors.MisconfigurationError as err:
raise errors.MisconfigurationError("Problem in %s: %s" % (filename, err.message))
def get_all_certs_keys(self):
"""Gets all certs and keys in the nginx config.
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: set
"""
c_k = set()
vhosts = self.get_vhosts()
for vhost in vhosts:
tup = [None, None, vhost.filep]
if vhost.ssl:
for directive in vhost.raw:
# A directive can be an empty list to preserve whitespace
if not directive:
continue
if directive[0] == 'ssl_certificate':
tup[0] = directive[1]
elif directive[0] == 'ssl_certificate_key':
tup[1] = directive[1]
if tup[0] is not None and tup[1] is not None:
c_k.add(tuple(tup))
return c_k
def _do_for_subarray(entry, condition, func, path=None):
"""Executes a function for a subarray of a nested array if it matches
the given condition.
:param list entry: The list to iterate over
:param function condition: Returns true iff func should be executed on item
:param function func: The function to call for each matching item
"""
if path is None:
path = []
if isinstance(entry, list):
if condition(entry):
func(entry, path)
else:
for index, item in enumerate(entry):
_do_for_subarray(item, condition, func, path + [index])
def get_best_match(target_name, names):
"""Finds the best match for target_name out of names using the Nginx
name-matching rules (exact > longest wildcard starting with * >
longest wildcard ending with * > regex).
:param str target_name: The name to match
:param set names: The candidate server names
:returns: Tuple of (type of match, the name that matched)
:rtype: tuple
"""
exact = []
wildcard_start = []
wildcard_end = []
regex = []
for name in names:
if _exact_match(target_name, name):
exact.append(name)
elif _wildcard_match(target_name, name, True):
wildcard_start.append(name)
elif _wildcard_match(target_name, name, False):
wildcard_end.append(name)
elif _regex_match(target_name, name):
regex.append(name)
if len(exact) > 0:
# There can be more than one exact match; e.g. eff.org, .eff.org
match = min(exact, key=len)
return ('exact', match)
if len(wildcard_start) > 0:
# Return the longest wildcard
match = max(wildcard_start, key=len)
return ('wildcard_start', match)
if len(wildcard_end) > 0:
# Return the longest wildcard
match = max(wildcard_end, key=len)
return ('wildcard_end', match)
if len(regex) > 0:
# Just return the first one for now
match = regex[0]
return ('regex', match)
return (None, None)
def _exact_match(target_name, name):
return target_name == name or '.' + target_name == name
def _wildcard_match(target_name, name, start):
# Degenerate case
if name == '*':
return True
parts = target_name.split('.')
match_parts = name.split('.')
# If the domain ends in a wildcard, do the match procedure in reverse
if not start:
parts.reverse()
match_parts.reverse()
# The first part must be a wildcard or blank, e.g. '.eff.org'
first = match_parts.pop(0)
if first != '*' and first != '':
return False
target_name = '.'.join(parts)
name = '.'.join(match_parts)
# Ex: www.eff.org matches *.eff.org, eff.org does not match *.eff.org
return target_name.endswith('.' + name)
def _regex_match(target_name, name):
# Must start with a tilde
if len(name) < 2 or name[0] != '~':
return False
# After tilde is a perl-compatible regex
try:
regex = re.compile(name[1:])
if re.match(regex, target_name):
return True
else:
return False
except re.error: # pragma: no cover
# perl-compatible regexes are sometimes not recognized by python
return False
def _is_include_directive(entry):
"""Checks if an nginx parsed entry is an 'include' directive.
:param list entry: the parsed entry
:returns: Whether it's an 'include' directive
:rtype: bool
"""
return (isinstance(entry, list) and
len(entry) == 2 and entry[0] == 'include' and
isinstance(entry[1], str))
def _get_servernames(names):
"""Turns a server_name string into a list of server names
:param str names: server names
:rtype: list
"""
whitespace_re = re.compile(r'\s+')
names = re.sub(whitespace_re, ' ', names)
return names.split(' ')
def parse_server(server):
"""Parses a list of server directives.
:param list server: list of directives in a server block
:rtype: dict
"""
parsed_server = {'addrs': set(),
'ssl': False,
'names': set()}
apply_ssl_to_all_addrs = False
for directive in server:
if not directive:
continue
if directive[0] == 'listen':
addr = obj.Addr.fromstring(directive[1])
parsed_server['addrs'].add(addr)
if not parsed_server['ssl'] and addr.ssl:
parsed_server['ssl'] = True
elif directive[0] == 'server_name':
parsed_server['names'].update(
_get_servernames(directive[1]))
elif directive[0] == 'ssl' and directive[1] == 'on':
parsed_server['ssl'] = True
apply_ssl_to_all_addrs = True
if apply_ssl_to_all_addrs:
for addr in parsed_server['addrs']:
addr.ssl = True
return parsed_server
def _add_directives(block, directives, replace):
"""Adds or replaces directives in a config block.
When replace=False, it's an error to try and add a directive that already
exists in the config block with a conflicting value.
When replace=True, a directive with the same name MUST already exist in the
config block, and the first instance will be replaced.
..todo :: Find directives that are in included files.
:param list block: The block to replace in
:param list directives: The new directives.
"""
for directive in directives:
_add_directive(block, directive, replace)
if block and '\n' not in block[-1]: # could be " \n " or ["\n"] !
block.append(nginxparser.UnspacedList('\n'))
REPEATABLE_DIRECTIVES = set(['server_name', 'listen', 'include'])
COMMENT = ' managed by Certbot'
COMMENT_BLOCK = [' ', '#', COMMENT]
def _comment_directive(block, location):
"""Add a comment to the end of the line at location."""
next_entry = block[location + 1] if location + 1 < len(block) else None
if isinstance(next_entry, list) and next_entry:
if len(next_entry) >= 2 and next_entry[-2] == "#" and COMMENT in next_entry[-1]:
return
elif isinstance(next_entry, nginxparser.UnspacedList):
next_entry = next_entry.spaced[0]
else:
next_entry = next_entry[0]
block.insert(location + 1, COMMENT_BLOCK[:])
if next_entry is not None and "\n" not in next_entry:
block.insert(location + 2, '\n')
def _add_directive(block, directive, replace):
"""Adds or replaces a single directive in a config block.
See _add_directives for more documentation.
"""
directive = nginxparser.UnspacedList(directive)
if len(directive) == 0 or directive[0] == '#':
# whitespace or comment
block.append(directive)
return
# Find the index of a config line where the name of the directive matches
# the name of the directive we want to add. If no line exists, use None.
location = next((index for index, line in enumerate(block)
if line and line[0] == directive[0]), None)
if replace:
if location is None:
raise errors.MisconfigurationError(
'expected directive for {0} in the Nginx '
'config but did not find it.'.format(directive[0]))
block[location] = directive
_comment_directive(block, location)
else:
# Append directive. Fail if the name is not a repeatable directive name,
# and there is already a copy of that directive with a different value
# in the config file.
directive_name = directive[0]
directive_value = directive[1]
if location is None or (isinstance(directive_name, str) and
directive_name in REPEATABLE_DIRECTIVES):
block.append(directive)
_comment_directive(block, len(block) - 1)
elif block[location][1] != directive_value:
raise errors.MisconfigurationError(
'tried to insert directive "{0}" but found '
'conflicting "{1}".'.format(directive, block[location]))
|
|
import datetime
import re
import flask
import jinja2
import requests
from pluss.app import app, full_url_for
from pluss.handlers import oauth2
from pluss.util import dateutils
from pluss.util.cache import Cache
from pluss.util.config import Config
from pluss.util.ratelimit import ratelimited
GPLUS_API_ACTIVITIES_ENDPOINT = 'https://www.googleapis.com/plus/v1/people/%s/activities/public'
ATOM_CACHE_KEY_TEMPLATE = 'pluss--atom--1--%s'
@ratelimited
@app.route('/atom/<gplus_id>')
def user_atom(gplus_id):
"""Display an Atom-format feed for a user id."""
return atom(gplus_id)
@ratelimited
@app.route('/atom/<gplus_id>/<page_id>')
def page_atom(gplus_id, page_id):
"""Display an Atom-format feed for a page, using a user's key."""
return atom(gplus_id, page_id)
def atom(gplus_id, page_id=None):
"""Return an Atom-format feed for the given G+ id, possibly from cache."""
if len(gplus_id) != 21:
return 'Invalid G+ user ID (must be exactly 21 digits).', 404 # Not Found
if page_id and len(page_id) != 21:
return 'Invalid G+ page ID (must be exactly 21 digits).', 404 # Not Found
# Google+ is no longer publicly available for consumers.
return 'Google+ was sunset for consumer users in April 2019. This feed is no longer available.', 410 # Gone
##### CODE BELOW FOR HISTORICAL PURPOSES ONLY #####
cache_key = ATOM_CACHE_KEY_TEMPLATE % gplus_id
if page_id:
cache_key = '%s-%s' % (cache_key, page_id)
response = Cache.get(cache_key) # A frozen Response object
if response is None:
try:
response = generate_atom(gplus_id, page_id)
except oauth2.UnavailableException as e:
app.logger.info("Feed request failed - %r", e)
flask.abort(e.status)
response.add_etag()
response.freeze()
Cache.set(cache_key, response, time=Config.getint('cache', 'stream-expire'))
return response.make_conditional(flask.request)
def generate_atom(gplus_id, page_id):
"""Generate an Atom-format feed for the given G+ id."""
# If no page id specified, use the special value 'me' which refers to the
# stream for the owner of the OAuth2 token.
request = requests.Request('GET', GPLUS_API_ACTIVITIES_ENDPOINT % (page_id or 'me'),
params={'maxResults': 10, 'userIp': flask.request.remote_addr})
api_response = oauth2.authed_request_for_id(gplus_id, request)
result = api_response.json()
if page_id:
request_url = full_url_for('page_atom', gplus_id=gplus_id, page_id=page_id)
else:
request_url = full_url_for('user_atom', gplus_id=gplus_id)
params = {
'server_url': full_url_for('main'),
'feed_id': page_id or gplus_id,
'request_url': request_url,
'to_atom_date': dateutils.to_atom_format,
}
items = result.get('items')
if not items:
params['last_update'] = datetime.datetime.today()
body = flask.render_template('atom/empty.xml', **params)
else:
last_update = max(dateutils.from_iso_format(item['updated']) for item in items)
params['last_update'] = last_update
params['items'] = process_feed_items(items)
params['actor'] = params['items'][0]['actor']
body = flask.render_template('atom/feed.xml', **params)
response = flask.make_response(body)
response.headers['Content-Type'] = 'application/atom+xml; charset=utf-8'
response.date = params['last_update']
return response
def process_feed_items(api_items):
"""Generate a list of items for use in an Atom feed template from an API result."""
return [process_feed_item(item) for item in api_items]
def process_feed_item(api_item):
"""Generate a single item for use in an Atom feed template from an API result."""
# Begin with the fields shared by all feed items.
item = {
'id': api_item['id'],
'permalink': api_item['url'],
'published': dateutils.from_iso_format(api_item['published']),
'updated': dateutils.from_iso_format(api_item['updated']),
'actor': process_actor(api_item['actor']),
}
# Choose which processor to use for this feed item
verb_processor = {
'post': process_post,
'share': process_share,
'checkin': process_checkin,
}.get(api_item['verb'], process_unknown)
item.update(verb_processor(api_item))
return item
def process_post(api_item, nested=False):
"""Process a standard post."""
obj = api_item['object']
html = obj.get('content')
attachments = process_attachments(obj.get('attachments'))
# Normally, create the title from the post text
title = create_title(html)
# If that doesn't work, fall back to the first attachment's title
if not title and attachments:
title = attachments[0]['title']
# If that also doesn't work, use a default title
if not title:
title = 'A G+ Post'
content = flask.render_template('atom/post.html', html=html, attachments=attachments)
result = {
'content': content,
'title': title,
}
if nested:
# These extra fields are only used in nested calls (e.g. shares)
result['actor'] = process_actor(obj.get('actor'))
result['url'] = obj.get('url')
return result
def process_share(api_item):
"""Process a shared item."""
html = api_item.get('annotation')
original = process_post(api_item, nested=True)
# Normally, create the title from the resharer's note
# If that doesn't work, fall back to the shared item's title
title = create_title(html) or original['title']
content = flask.render_template('atom/share.html', html=html, original=original)
return {
'content': content,
'title': title,
}
def process_checkin(api_item):
"""Process a check-in."""
actor = process_actor(api_item.get('actor'))
original = process_post(api_item, nested=True)
content = flask.render_template('atom/checkin.html', actor=actor, original=original)
return {
'content': content,
'title': original['title'],
}
def process_unknown(api_item):
"""Process an item of unknown type."""
# Try parsing it as a regular post
original = process_post(api_item)
if original['content']:
return original
# If that fails, just use a link to the post.
content = '<a href="%(url)s">%(url)s</a>' % {'url': api_item.get('url')}
return {
'content': content,
'title': 'A G+ Activity',
}
def process_actor(api_actor):
"""Parse an actor definition from an API result."""
api_actor = api_actor or {}
return {
'id': api_actor.get('id'),
'name': api_actor.get('displayName'),
'url': api_actor.get('url'),
'image_url': api_actor.get('image', {}).get('url'),
}
def process_attachments(attachments):
"""Parse a list of attachments from an API result."""
results = []
attachments = attachments or []
type_processors = {
'article': process_attached_article,
'photo': process_attached_photo,
'album': process_attached_album,
'video': process_attached_video,
'event': process_attached_event,
}
for attachment in attachments:
item_type = attachment.get('objectType')
processor = type_processors.get(item_type)
if processor:
results.append(processor(attachment))
else:
descriptor = '[attachment with unsupported type "%s"]' % item_type
results.append({
'html': descriptor,
'title': descriptor,
})
return results
def process_attached_article(attachment):
"""Parse an attached article."""
title = attachment.get('displayName') or attachment.get('url')
html = flask.render_template('atom/article.html', article=attachment, title=title)
return {
'html': html,
'title': title,
}
def process_attached_photo(attachment):
"""Process an attached individual photo."""
title = attachment['image'].get('displayName')
html = flask.render_template('atom/photo.html', photo=attachment)
return {
'html': html,
'title': title or 'An Image',
}
def process_attached_video(attachment):
"""Process an attached video."""
title = attachment.get('displayName') or attachment.get('url')
html = flask.render_template('atom/video.html', video=attachment)
return {
'html': html,
'title': title or 'A Video',
}
def process_attached_album(attachment):
"""Process an attached photo album."""
title = attachment.get('displayName')
thumbnails = attachment.get('thumbnails', [])
if len(thumbnails) > 1:
thumbnails[0]['first'] = True
big_size = thumbnails[0].get('image', {}).get('height', 0)
small_size = thumbnails[1].get('image', {}).get('height', 1)
offset = big_size % small_size
max_offset = (big_size // small_size) + 1
if offset > max_offset:
offset = offset - small_size
if abs(offset) > max_offset:
offset = 0
offset = -offset
else:
offset = 0
html = flask.render_template('atom/album.html', album=attachment, offset=offset)
return {
'html': html,
'title': title or 'An Album',
}
def process_attached_event(attachment):
"""Process an attached G+ event."""
title = attachment.get('displayName')
html = flask.render_template('atom/event.html', event=attachment)
return {
'html': html,
'title': title,
}
def create_title(html):
"""Attempt to devise a title for an arbitrary piece of html content."""
if not html:
return None
# Try just the text before the first line break, and see if that gives a decent title.
# If it does, use that, otherwise, use the full text.
first_line = re.split(r'<br\s*/?>', html)[0]
first_line_text = jinja2.Markup(first_line).striptags()
if len(first_line_text) > 3:
text = first_line_text
else:
text = jinja2.Markup(html).striptags()
# If we're already at 100 characters or less, we're good.
if len(text) <= 100:
return text
# Trim things down, avoiding breaking words.
shortened = text[:97]
if ' ' in shortened[-10:]:
shortened = shortened.rsplit(' ', 1)[0]
return shortened + '...'
# vim: set ts=4 sts=4 sw=4 et:
|
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_concurrency import lockutils
from tempest import clients
from tempest import config
from tempest.lib import auth
from tempest.lib.common import dynamic_creds
from tempest.lib.common import preprov_creds
from tempest.lib import exceptions
CONF = config.CONF
"""This module provides factories of credential and credential providers
Credentials providers and clients are (going to be) part of tempest.lib,
and so they may not hold any dependency to tempest configuration.
Methods in this module collect the relevant configuration details and pass
them to credentials providers and clients, so that test can have easy
access to these features.
Client managers with hard-coded configured credentials are also moved here,
to avoid circular dependencies."""
# === Credential Providers
# Subset of the parameters of credential providers that depend on configuration
def _get_common_provider_params(identity_version):
if identity_version == 'v3':
identity_uri = CONF.identity.uri_v3
elif identity_version == 'v2':
identity_uri = CONF.identity.uri
return {
'identity_version': identity_version,
'identity_uri': identity_uri,
'credentials_domain': CONF.auth.default_credentials_domain_name,
'admin_role': CONF.identity.admin_role
}
def get_dynamic_provider_params(identity_version, admin_creds=None):
"""Dynamic provider parameters setup from config
This helper returns a dict of parameter that can be used to initialise
a `DynamicCredentialProvider` according to tempest configuration.
Parameters that are not configuration specific (name, network_resources)
are not returned.
:param identity_version: 'v2' or 'v3'
:param admin_creds: An object of type `auth.Credentials`. If None, it
is built from the configuration file as well.
:return: A dict with the parameters
"""
_common_params = _get_common_provider_params(identity_version)
admin_creds = admin_creds or get_configured_admin_credentials(
fill_in=True, identity_version=identity_version)
if identity_version == 'v3':
endpoint_type = CONF.identity.v3_endpoint_type
elif identity_version == 'v2':
endpoint_type = CONF.identity.v2_admin_endpoint_type
return dict(_common_params, **dict([
('admin_creds', admin_creds),
('identity_admin_domain_scope', CONF.identity.admin_domain_scope),
('identity_admin_role', CONF.identity.admin_role),
('extra_roles', CONF.auth.tempest_roles),
('neutron_available', CONF.service_available.neutron),
('project_network_cidr', CONF.network.project_network_cidr),
('project_network_mask_bits', CONF.network.project_network_mask_bits),
('public_network_id', CONF.network.public_network_id),
('create_networks', (CONF.auth.create_isolated_networks and not
CONF.network.shared_physical_network)),
('resource_prefix', CONF.resources_prefix),
('identity_admin_endpoint_type', endpoint_type)
]))
def get_preprov_provider_params(identity_version):
"""Pre-provisioned provider parameters setup from config
This helper returns a dict of parameter that can be used to initialise
a `PreProvisionedCredentialProvider` according to tempest configuration.
Parameters that are not configuration specific (name) are not returned.
:param identity_version: 'v2' or 'v3'
:return: A dict with the parameters
"""
_common_params = _get_common_provider_params(identity_version)
reseller_admin_role = CONF.object_storage.reseller_admin_role
return dict(_common_params, **dict([
('accounts_lock_dir', lockutils.get_lock_path(CONF)),
('test_accounts_file', CONF.auth.test_accounts_file),
('object_storage_operator_role', CONF.object_storage.operator_role),
('object_storage_reseller_admin_role', reseller_admin_role)
]))
def get_credentials_provider(name, network_resources=None,
force_tenant_isolation=False,
identity_version=None):
"""Return the right implementation of CredentialProvider based on config
This helper returns the right implementation of CredentialProvider based on
config and on the value of force_tenant_isolation.
:param name: When provided, it makes it possible to associate credential
artifacts back to the owner (test class).
:param network_resources: Dictionary of network resources to be allocated
for each test account. Only valid for the dynamic
credentials provider.
:param force_tenant_isolation: Always return a `DynamicCredentialProvider`,
regardless of the configuration.
:param identity_version: Use the specified identity API version, regardless
of the configuration. Valid values are 'v2', 'v3'.
"""
# If a test requires a new account to work, it can have it via forcing
# dynamic credentials. A new account will be produced only for that test.
# In case admin credentials are not available for the account creation,
# the test should be skipped else it would fail.
identity_version = identity_version or CONF.identity.auth_version
if CONF.auth.use_dynamic_credentials or force_tenant_isolation:
return dynamic_creds.DynamicCredentialProvider(
name=name,
network_resources=network_resources,
**get_dynamic_provider_params(identity_version))
else:
if CONF.auth.test_accounts_file:
# Most params are not relevant for pre-created accounts
return preprov_creds.PreProvisionedCredentialProvider(
name=name,
**get_preprov_provider_params(identity_version))
else:
raise exceptions.InvalidConfiguration(
'A valid credential provider is needed')
def is_admin_available(identity_version):
"""Helper to check for admin credentials
Helper function to check if a set of admin credentials is available so we
can do a single call from skip_checks.
This helper depends on identity_version as there may be admin credentials
available for v2 but not for v3.
:param identity_version: 'v2' or 'v3'
"""
is_admin = True
# If dynamic credentials is enabled admin will be available
if CONF.auth.use_dynamic_credentials:
return is_admin
# Check whether test accounts file has the admin specified or not
elif CONF.auth.test_accounts_file:
check_accounts = preprov_creds.PreProvisionedCredentialProvider(
name='check_admin',
**get_preprov_provider_params(identity_version))
if not check_accounts.admin_available():
is_admin = False
else:
try:
get_configured_admin_credentials(fill_in=False,
identity_version=identity_version)
except exceptions.InvalidConfiguration:
is_admin = False
return is_admin
def is_alt_available(identity_version):
"""Helper to check for alt credentials
Helper function to check if a second set of credentials is available (aka
alt credentials) so we can do a single call from skip_checks.
This helper depends on identity_version as there may be alt credentials
available for v2 but not for v3.
:param identity_version: 'v2' or 'v3'
"""
# If dynamic credentials is enabled alt will be available
if CONF.auth.use_dynamic_credentials:
return True
# Check whether test accounts file has the admin specified or not
if CONF.auth.test_accounts_file:
check_accounts = preprov_creds.PreProvisionedCredentialProvider(
name='check_alt',
**get_preprov_provider_params(identity_version))
else:
raise exceptions.InvalidConfiguration(
'A valid credential provider is needed')
try:
if not check_accounts.is_multi_user():
return False
else:
return True
except exceptions.InvalidConfiguration:
return False
# === Credentials
# Type of credentials available from configuration
CREDENTIAL_TYPES = {
'identity_admin': ('auth', 'admin'),
'user': ('identity', None),
'alt_user': ('identity', 'alt')
}
DEFAULT_PARAMS = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
def get_configured_admin_credentials(fill_in=True, identity_version=None):
"""Get admin credentials from the config file
Read credentials from configuration, builds a Credentials object based on
the specified or configured version
:param fill_in: If True, a request to the Token API is submitted, and the
credential object is filled in with all names and IDs from
the token API response.
:param identity_version: The identity version to talk to and the type of
credentials object to be created. 'v2' or 'v3'.
:returns: An object of a sub-type of `auth.Credentials`
"""
identity_version = identity_version or CONF.identity.auth_version
if identity_version not in ('v2', 'v3'):
raise exceptions.InvalidConfiguration(
'Unsupported auth version: %s' % identity_version)
conf_attributes = ['username', 'password',
'project_name']
if identity_version == 'v3':
conf_attributes.append('domain_name')
# Read the parts of credentials from config
params = DEFAULT_PARAMS.copy()
for attr in conf_attributes:
params[attr] = getattr(CONF.auth, 'admin_' + attr)
# Build and validate credentials. We are reading configured credentials,
# so validate them even if fill_in is False
credentials = get_credentials(fill_in=fill_in,
identity_version=identity_version, **params)
if not fill_in:
if not credentials.is_valid():
msg = ("The admin credentials are incorrectly set in the config "
"file for identity version %s. Double check that all "
"required values are assigned.")
raise exceptions.InvalidConfiguration(msg % identity_version)
return credentials
def get_credentials(fill_in=True, identity_version=None, **kwargs):
"""Get credentials from dict based on config
Wrapper around auth.get_credentials to use the configured identity version
if none is specified.
:param fill_in: If True, a request to the Token API is submitted, and the
credential object is filled in with all names and IDs from
the token API response.
:param identity_version: The identity version to talk to and the type of
credentials object to be created. 'v2' or 'v3'.
:param kwargs: Attributes to be used to build the Credentials object.
:returns: An object of a sub-type of `auth.Credentials`
"""
params = dict(DEFAULT_PARAMS, **kwargs)
identity_version = identity_version or CONF.identity.auth_version
# In case of "v3" add the domain from config if not specified
# To honour the "default_credentials_domain_name", if not domain
# field is specified at all, add it the credential dict.
if identity_version == 'v3':
domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
if 'domain' in x)
if not domain_fields.intersection(kwargs.keys()):
domain_name = CONF.auth.default_credentials_domain_name
# NOTE(andreaf) Setting domain_name implicitly sets user and
# project domain names, if they are None
params['domain_name'] = domain_name
auth_url = CONF.identity.uri_v3
else:
auth_url = CONF.identity.uri
return auth.get_credentials(auth_url,
fill_in=fill_in,
identity_version=identity_version,
**params)
# === Credential / client managers
class AdminManager(clients.Manager):
"""Manager that uses admin credentials for its managed client objects"""
def __init__(self):
super(AdminManager, self).__init__(
credentials=get_configured_admin_credentials())
|
|
import unittest
import tempfile
import inspect
import shutil
import sys
import os
import re
from os.path import basename, dirname, exists, join, normpath
from robot.errors import DataError
from robot.utils import abspath, JYTHON, WINDOWS
from robot.utils.importer import Importer, ByPathImporter
from robot.utils.asserts import (assert_equals, assert_true, assert_raises,
assert_raises_with_msg)
CURDIR = dirname(abspath(__file__))
LIBDIR = normpath(join(CURDIR, '..', '..', 'atest', 'testresources', 'testlibs'))
TEMPDIR = tempfile.gettempdir()
TESTDIR = join(TEMPDIR, 'robot-importer-testing')
WINDOWS_PATH_IN_ERROR = re.compile(r"'\w:\\")
def assert_prefix(error, expected):
message = unicode(error)
count = 3 if WINDOWS_PATH_IN_ERROR.search(message) else 2
prefix = ':'.join(message.split(':')[:count]) + ':'
assert_equals(prefix, expected)
def create_temp_file(name, attr=42, extra_content=''):
if not exists(TESTDIR):
os.mkdir(TESTDIR)
path = join(TESTDIR, name)
with open(path, 'w') as file:
file.write('attr = %r\n' % attr)
file.write('def func():\n return attr\n')
file.write(extra_content)
return path
class LoggerStub(object):
def __init__(self, remove_extension=False):
self.messages = []
self.remove_extension = remove_extension
def info(self, msg):
if self.remove_extension:
for ext in '$py.class', '.pyc', '.py':
msg = msg.replace(ext, '')
self.messages.append(self._normalize_drive_letter(msg))
def assert_message(self, msg, index=0):
assert_equals(self.messages[index], self._normalize_drive_letter(msg))
def _normalize_drive_letter(self, msg):
if not WINDOWS:
return msg
return re.sub("'\\w:", lambda match: match.group().upper(), msg)
class TestImportByPath(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
if exists(TESTDIR):
shutil.rmtree(TESTDIR)
def test_python_file(self):
path = create_temp_file('test.py')
self._import_and_verify(path, remove='test')
self._assert_imported_message('test', path)
def test_python_directory(self):
create_temp_file('__init__.py')
module_name = basename(TESTDIR)
self._import_and_verify(TESTDIR, remove=module_name)
self._assert_imported_message(module_name, TESTDIR)
def test_import_same_file_multiple_times(self):
path = create_temp_file('test.py')
self._import_and_verify(path, remove='test')
self._assert_imported_message('test', path)
self._import_and_verify(path)
self._assert_imported_message('test', path)
self._import_and_verify(path, name='library')
self._assert_imported_message('test', path, type='library module')
def test_import_different_file_and_directory_with_same_name(self):
path1 = create_temp_file('test.py', attr=1)
self._import_and_verify(path1, attr=1, remove='test')
self._assert_imported_message('test', path1)
path2 = join(TESTDIR, 'test')
os.mkdir(path2)
create_temp_file(join(path2, '__init__.py'), attr=2)
self._import_and_verify(path2, attr=2, directory=path2)
self._assert_removed_message('test')
self._assert_imported_message('test', path2, index=1)
path3 = create_temp_file(join(path2, 'test.py'), attr=3)
self._import_and_verify(path3, attr=3, directory=path2)
self._assert_removed_message('test')
self._assert_imported_message('test', path3, index=1)
def test_import_class_from_file(self):
path = create_temp_file('test.py', extra_content='class test:\n def m(s): return 1')
klass = self._import(path, remove='test')
self._assert_imported_message('test', path, type='class')
assert_true(inspect.isclass(klass))
assert_equals(klass.__name__, 'test')
assert_equals(klass().m(), 1)
def test_invalid_python_file(self):
path = create_temp_file('test.py', extra_content='invalid content')
error = assert_raises(DataError, self._import_and_verify, path, remove='test')
assert_prefix(error, "Importing '%s' failed: SyntaxError:" % path)
if JYTHON:
def test_java_class_with_java_extension(self):
path = join(CURDIR, 'ImportByPath.java')
self._import_and_verify(path, remove='ImportByPath')
self._assert_imported_message('ImportByPath', path, type='class')
def test_java_class_with_class_extension(self):
path = join(CURDIR, 'ImportByPath.class')
self._import_and_verify(path, remove='ImportByPath', name='java')
self._assert_imported_message('ImportByPath', path, type='java class')
def test_importing_java_package_fails(self):
path = join(LIBDIR, 'javapkg')
assert_raises_with_msg(DataError,
"Importing '%s' failed: Expected class or "
"module, got javapackage." % path,
self._import, path, remove='javapkg')
def test_removing_from_sys_modules_when_importing_multiple_times(self):
path = join(CURDIR, 'ImportByPath.java')
self._import(path, name='java', remove='ImportByPath')
self._assert_imported_message('ImportByPath', path, 'java class')
self._import(path)
self._assert_removed_message('ImportByPath')
self._assert_imported_message('ImportByPath', path, 'class', index=1)
def _import_and_verify(self, path, attr=42, directory=TESTDIR,
name=None, remove=None):
module = self._import(path, name, remove)
assert_equals(module.attr, attr)
assert_equals(module.func(), attr)
if hasattr(module, '__file__'):
assert_equals(dirname(abspath(module.__file__)), directory)
def _import(self, path, name=None, remove=None):
if remove and remove in sys.modules:
del sys.modules[remove]
self.logger = LoggerStub()
importer = Importer(name, self.logger)
sys_path_before = sys.path[:]
try:
return importer.import_class_or_module_by_path(path)
finally:
assert_equals(sys.path, sys_path_before)
def _assert_imported_message(self, name, source, type='module', index=0):
msg = "Imported %s '%s' from '%s'." % (type, name, source)
self.logger.assert_message(msg, index=index)
def _assert_removed_message(self, name, index=0):
msg = "Removed module '%s' from sys.modules to import fresh module." % name
self.logger.assert_message(msg, index=index)
class TestInvalidImportPath(unittest.TestCase):
def test_non_existing(self):
path = 'non-existing.py'
assert_raises_with_msg(DataError,
"Importing '%s' failed: File or directory does not exist." % path,
Importer().import_class_or_module_by_path, path)
path = abspath(path)
assert_raises_with_msg(DataError,
"Importing test file '%s' failed: File or directory does not exist." % path,
Importer('test file').import_class_or_module_by_path, path)
def test_non_absolute(self):
path = os.listdir('.')[0]
assert_raises_with_msg(DataError,
"Importing '%s' failed: Import path must be absolute." % path,
Importer().import_class_or_module_by_path, path)
assert_raises_with_msg(DataError,
"Importing file '%s' failed: Import path must be absolute." % path,
Importer('file').import_class_or_module_by_path, path)
def test_invalid_format(self):
path = join(CURDIR, '..', '..', 'README.rst')
assert_raises_with_msg(DataError,
"Importing '%s' failed: Not a valid file or directory to import." % path,
Importer().import_class_or_module_by_path, path)
assert_raises_with_msg(DataError,
"Importing xxx '%s' failed: Not a valid file or directory to import." % path,
Importer('xxx').import_class_or_module_by_path, path)
class TestImportClassOrModule(unittest.TestCase):
def test_import_module_file(self):
module = self._import_module('classes')
assert_equals(module.__version__, 'N/A')
def test_import_module_directory(self):
module = self._import_module('pythonmodule')
assert_equals(module.some_string, 'Hello, World!')
def test_import_non_existing(self):
error = assert_raises(DataError, self._import, 'NonExisting')
assert_prefix(error, "Importing 'NonExisting' failed: ImportError:")
def test_import_sub_module(self):
module = self._import_module('pythonmodule.library')
assert_equals(module.keyword_from_submodule('Kitty'), 'Hello, Kitty!')
module = self._import_module('pythonmodule.submodule')
assert_equals(module.attribute, 42)
module = self._import_module('pythonmodule.submodule.sublib')
assert_equals(module.keyword_from_deeper_submodule(), 'hi again')
def test_import_class_with_same_name_as_module(self):
klass = self._import_class('ExampleLibrary')
assert_equals(klass().return_string_from_library('xxx'), 'xxx')
def test_import_class_from_module(self):
klass = self._import_class('ExampleLibrary.ExampleLibrary')
assert_equals(klass().return_string_from_library('yyy'), 'yyy')
def test_import_class_from_sub_module(self):
klass = self._import_class('pythonmodule.submodule.sublib.Sub')
assert_equals(klass().keyword_from_class_in_deeper_submodule(), 'bye')
def test_import_non_existing_item_from_existing_module(self):
assert_raises_with_msg(DataError,
"Importing 'pythonmodule.NonExisting' failed: "
"Module 'pythonmodule' does not contain 'NonExisting'.",
self._import, 'pythonmodule.NonExisting')
assert_raises_with_msg(DataError,
"Importing test library 'pythonmodule.none' failed: "
"Module 'pythonmodule' does not contain 'none'.",
self._import, 'pythonmodule.none', 'test library')
def test_invalid_item_from_existing_module(self):
assert_raises_with_msg(DataError,
"Importing 'pythonmodule.some_string' failed: "
"Expected class or module, got string.",
self._import, 'pythonmodule.some_string')
assert_raises_with_msg(DataError,
"Importing xxx 'pythonmodule.submodule.attribute' failed: "
"Expected class or module, got integer.",
self._import, 'pythonmodule.submodule.attribute', 'xxx')
def test_item_from_non_existing_module(self):
error = assert_raises(DataError, self._import, 'nonex.item')
assert_prefix(error, "Importing 'nonex.item' failed: ImportError:")
def test_import_file_by_path(self):
import bytelib as expected
module = self._import_module(join(LIBDIR, 'bytelib.py'))
assert_equals(module.__name__, expected.__name__)
assert_equals(dirname(normpath(module.__file__)),
dirname(normpath(expected.__file__)))
assert_equals(dir(module), dir(expected))
def test_import_class_from_file_by_path(self):
klass = self._import_class(join(LIBDIR, 'ExampleLibrary.py'))
assert_equals(klass().return_string_from_library('test'), 'test')
def test_invalid_file_by_path(self):
path = join(TEMPDIR, 'robot_import_invalid_test_file.py')
try:
with open(path, 'w') as file:
file.write('invalid content')
error = assert_raises(DataError, self._import, path)
assert_prefix(error, "Importing '%s' failed: SyntaxError:" % path)
finally:
os.remove(path)
def test_logging_when_importing_module(self):
logger = LoggerStub(remove_extension=True)
self._import_module('classes', 'test library', logger)
logger.assert_message("Imported test library module 'classes' from '%s'."
% join(LIBDIR, 'classes'))
def test_logging_when_importing_python_class(self):
logger = LoggerStub(remove_extension=True)
self._import_class('ExampleLibrary', logger=logger)
logger.assert_message("Imported class 'ExampleLibrary' from '%s'."
% join(LIBDIR, 'ExampleLibrary'))
if JYTHON:
def test_import_java_class(self):
klass = self._import_class('ExampleJavaLibrary')
assert_equals(klass().getCount(), 1)
def test_import_java_class_in_package(self):
klass = self._import_class('javapkg.JavaPackageExample')
assert_equals(klass().returnValue('xmas'), 'xmas')
def test_import_java_file_by_path(self):
import ExampleJavaLibrary as expected
klass = self._import_class(join(LIBDIR, 'ExampleJavaLibrary.java'))
assert_equals(klass().getCount(), 1)
assert_equals(klass.__name__, expected.__name__)
assert_equals(dir(klass), dir(expected))
def test_importing_java_package_fails(self):
assert_raises_with_msg(DataError,
"Importing test library 'javapkg' failed: "
"Expected class or module, got javapackage.",
self._import, 'javapkg', 'test library')
def test_logging_when_importing_java_class(self):
logger = LoggerStub()
self._import_class('ExampleJavaLibrary', 'java', logger)
logger.assert_message("Imported java class 'ExampleJavaLibrary' "
"from unknown location.")
def _import_module(self, name, type=None, logger=None):
module = self._import(name, type, logger)
assert_true(inspect.ismodule(module))
return module
def _import_class(self, name, type=None, logger=None):
klass = self._import(name, type, logger)
assert_true(inspect.isclass(klass))
return klass
def _import(self, name, type=None, logger=None):
return Importer(type, logger or LoggerStub()).import_class_or_module(name)
class TestErrorDetails(unittest.TestCase):
def test_no_traceback(self):
error = self._failing_import('NoneExisting')
assert_equals(self._get_traceback(error),
'Traceback (most recent call last):\n None')
def test_traceback(self):
path = create_temp_file('tb.py', extra_content='import nonex')
try:
error = self._failing_import(path)
finally:
shutil.rmtree(TESTDIR)
assert_equals(self._get_traceback(error),
'Traceback (most recent call last):\n'
' File "%s", line 4, in <module>\n'
' import nonex' % path)
def test_pythonpath(self):
error = self._failing_import('NoneExisting')
lines = self._get_pythonpath(error).splitlines()
assert_equals(lines[0], 'PYTHONPATH:')
for line in lines[1:]:
assert_true(line.startswith(' '))
def test_non_ascii_bytes_in_pythonpath(self):
sys.path.append('hyv\xe4')
try:
error = self._failing_import('NoneExisting')
finally:
sys.path.pop()
last_line = self._get_pythonpath(error).splitlines()[-1].strip()
assert_true(last_line.startswith('hyv'))
if JYTHON:
def test_classpath(self):
error = self._failing_import('NoneExisting')
lines = self._get_classpath(error).splitlines()
assert_equals(lines[0], 'CLASSPATH:')
for line in lines[1:]:
assert_true(line.startswith(' '))
def test_structure(self):
error = self._failing_import('NoneExisting')
message = "Importing 'NoneExisting' failed: ImportError: No module named NoneExisting"
expected = (message, self._get_traceback(error),
self._get_pythonpath(error), self._get_classpath(error))
assert_equals(unicode(error), '\n'.join(expected).strip())
def _failing_import(self, name):
importer = Importer().import_class_or_module
return assert_raises(DataError, importer, name)
def _get_traceback(self, error):
return '\n'.join(self._block(error, 'Traceback (most recent call last):',
'PYTHONPATH:'))
def _get_pythonpath(self, error):
return '\n'.join(self._block(error, 'PYTHONPATH:', 'CLASSPATH:'))
def _get_classpath(self, error):
return '\n'.join(self._block(error, 'CLASSPATH:'))
def _block(self, error, start, end=None):
include = False
for line in unicode(error).splitlines():
if line == end:
return
if line == start:
include = True
if include:
yield line
class TestSplitPathToModule(unittest.TestCase):
def _verify(self, file_name, expected_name):
path = abspath(file_name)
actual = ByPathImporter(None)._split_path_to_module(path)
assert_equals(actual, (dirname(path), expected_name))
def test_normal_file(self):
self._verify('hello.py', 'hello')
self._verify('hello.class', 'hello')
self._verify('hello.world.java', 'hello.world')
def test_jython_class_file(self):
self._verify('hello$py.class', 'hello')
self._verify('__init__$py.class', '__init__')
def test_directory(self):
self._verify('hello', 'hello')
self._verify('hello'+os.sep, 'hello')
class TestInstantiation(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
if exists(TESTDIR):
shutil.rmtree(TESTDIR)
def test_when_importing_by_name(self):
from ExampleLibrary import ExampleLibrary
lib = Importer().import_class_or_module('ExampleLibrary',
instantiate_with_args=())
assert_true(not inspect.isclass(lib))
assert_true(isinstance(lib, ExampleLibrary))
def test_with_arguments(self):
lib = Importer().import_class_or_module('libswithargs.Mixed', range(5))
assert_equals(lib.get_args(), (0, 1, '2 3 4'))
def test_when_importing_by_path(self):
path = create_temp_file('args.py', extra_content='class args: a=1')
lib = Importer().import_class_or_module_by_path(path, ())
assert_true(not inspect.isclass(lib))
assert_equals(lib.__class__.__name__, 'args')
assert_equals(lib.a, 1)
def test_instantiate_failure(self):
err = assert_raises(DataError, Importer().import_class_or_module,
'ExampleLibrary', ['accepts', 'no', 'args'])
assert_true(unicode(err).startswith("Importing 'ExampleLibrary' failed: "
"Creating instance failed: TypeError:"))
def test_modules_do_not_take_arguments(self):
path = create_temp_file('no_args_allowed.py')
assert_raises_with_msg(DataError,
"Importing '%s' failed: Modules do not take arguments." % path,
Importer().import_class_or_module_by_path,
path, ['invalid'])
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2018-03
import logging
import math
import re
import os
from functools import partial
from typing import List, Optional, Tuple
from PyQt5.QtGui import QPen, QBrush, QTextDocument, QFont, QFontMetrics, QPalette
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import QSize, QEventLoop, QObject, QTimer, QVariant, pyqtSlot, QModelIndex, Qt, QRect, QPoint, \
QMargins
from PyQt5.QtWidgets import QPushButton, QToolButton, QWidgetItem, QSpacerItem, QLayout, QHBoxLayout, QLineEdit, \
QLabel, QComboBox, QMenu, QMessageBox, QVBoxLayout, QCheckBox, QItemDelegate, QStyleOptionViewItem, QStyle
import app_cache
import app_utils
import dash_utils
from app_defs import FEE_DUFF_PER_BYTE, MIN_TX_FEE
from common import CancelException
from encrypted_files import write_file_encrypted, read_file_encrypted
from hw_intf import HwSessionInfo
from wallet_common import TxOutputType, Bip44AccountType, Bip44AddressType, TxType
from wnd_utils import WndUtils, is_color_dark
OUTPUT_VALUE_UNIT_AMOUNT = 'AMT'
OUTPUT_VALUE_UNIT_PERCENT = 'PCT'
MAX_DATA_FILE_SIZE = 10000000
CSV_SEPARATOR = ';'
CACHE_ITEM_DATA_FILE_MRU_LIST = 'SendFundsDestination_DataFileMRUList'
log = logging.getLogger('dmt.wallet_dlg')
class SendFundsDestinationItem(QObject):
sig_remove_address = QtCore.pyqtSignal(object)
sig_use_all_funds = QtCore.pyqtSignal(object)
sig_amount_changed = QtCore.pyqtSignal(object)
def __init__(self, parent, app_config, grid_layout, row_index, address_widget_width):
QObject.__init__(self)
self.app_config = app_config
self.main_layout = grid_layout
self.row_number = row_index
self.values_unit = OUTPUT_VALUE_UNIT_AMOUNT
self.value_amount = None
self.value_percent = None
self.inputs_total_amount = None # sum of all inputs (for counting percent type value)
self.address_widget_width = address_widget_width
self.setupUi(parent)
def setupUi(self, Form):
self.lbl_dest_address = QLabel(Form)
self.lbl_dest_address.setText("Address")
self.lbl_dest_address.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.main_layout.addWidget(self.lbl_dest_address, self.row_number, 0)
self.edt_dest_address = QLineEdit(Form)
self.edt_dest_address.setMinimumWidth(self.address_widget_width)
self.main_layout.addWidget(self.edt_dest_address, self.row_number, 1)
self.lbl_amount = QLabel(Form)
self.lbl_amount.setText("Amount")
self.main_layout.addWidget(self.lbl_amount, self.row_number, 2)
self.lay_amount = QHBoxLayout()
self.lay_amount.setContentsMargins(0, 0, 0, 0)
self.lay_amount.setSpacing(0)
self.main_layout.addLayout(self.lay_amount, self.row_number, 3)
self.edt_amount = QLineEdit(Form)
self.edt_amount.setFixedWidth(100)
self.edt_amount.textChanged.connect(self.on_edt_amount_changed)
self.lay_amount.addWidget(self.edt_amount)
self.btn_use_all = QToolButton(Form)
self.btn_use_all.setText('\u2912')
self.btn_use_all.setFixedSize(14, self.edt_amount.sizeHint().height())
self.btn_use_all.setToolTip('Use remaining funds')
self.btn_use_all.clicked.connect(self.on_btn_use_all_funds_clicked)
self.lay_amount.addWidget(self.btn_use_all)
# label for the second unit (e.g. percent if self.values_unit equals OUTPUT_VALUE_UNIT_AMOUNT)
self.lbl_second_unit_value = QLabel(Form)
self.lbl_second_unit_value.setTextInteractionFlags(
QtCore.Qt.LinksAccessibleByMouse | QtCore.Qt.TextSelectableByMouse)
self.main_layout.addWidget(self.lbl_second_unit_value, self.row_number, 4)
self.btn_remove_address = QToolButton(Form)
self.btn_remove_address.setFixedSize(self.edt_amount.sizeHint().height(), self.edt_amount.sizeHint().height())
self.main_layout.addWidget(self.btn_remove_address, self.row_number, 5)
self.btn_remove_address.setStyleSheet("QToolButton{color: red}")
self.btn_remove_address.setVisible(False)
self.btn_remove_address.clicked.connect(self.on_btn_remove_address_clicked)
self.btn_remove_address.setText('\u2716') # 2501, 2716
self.btn_remove_address.setToolTip("Remove address")
def set_btn_remove_address_visible(self, visible):
self.btn_remove_address.setVisible(visible)
def on_btn_remove_address_clicked(self):
self.sig_remove_address.emit(self)
def on_btn_use_all_funds_clicked(self):
self.sig_use_all_funds.emit(self)
def on_edt_amount_changed(self, text):
try:
value = round(float(self.edt_amount.text()), 8)
if self.values_unit == OUTPUT_VALUE_UNIT_AMOUNT:
self.value_amount = value
elif self.values_unit == OUTPUT_VALUE_UNIT_PERCENT:
self.value_percent = value
self.re_calculate_second_unit_value()
self.sig_amount_changed.emit(self)
except Exception:
pass
def get_value(self, default_value=None):
"""
:param default_value: value that will be returned if the value entered by a user is invalid or empty
"""
amount = self.edt_amount.text()
if amount:
try:
return float(amount)
except Exception:
pass
return default_value
def get_value_amount(self):
return self.value_amount
def set_value(self, value):
old_state = self.edt_amount.blockSignals(True)
try:
if value == '':
value = None
if self.values_unit == OUTPUT_VALUE_UNIT_AMOUNT:
self.value_amount = value
elif self.values_unit == OUTPUT_VALUE_UNIT_PERCENT:
self.value_percent = value
self.re_calculate_second_unit_value()
self.edt_amount.setText(app_utils.to_string(value))
finally:
self.edt_amount.blockSignals(old_state)
self.edt_amount.update()
def display_second_unit_value(self):
if self.values_unit == OUTPUT_VALUE_UNIT_AMOUNT:
if self.value_percent is not None:
self.lbl_second_unit_value.setText(app_utils.to_string(round(self.value_percent, 3)) + '%')
else:
self.lbl_second_unit_value.setText('')
elif self.values_unit == OUTPUT_VALUE_UNIT_PERCENT:
if self.value_amount is not None:
self.lbl_second_unit_value.setText(app_utils.to_string(round(self.value_amount, 8)) + ' Dash')
else:
self.lbl_second_unit_value.setText('')
def re_calculate_second_unit_value(self):
try:
if self.values_unit == OUTPUT_VALUE_UNIT_AMOUNT:
# calculate the percent-value based on the inputs total amount and our item's amount
if self.inputs_total_amount and self.value_amount is not None:
self.value_percent = round(self.value_amount * 100 / self.inputs_total_amount, 8)
self.display_second_unit_value()
elif self.values_unit == OUTPUT_VALUE_UNIT_PERCENT:
# calculate the amount value based on inputs total amount and our item's percent-value
if self.inputs_total_amount is not None and self.value_percent is not None:
self.value_amount = round(math.floor(self.inputs_total_amount * self.value_percent * 1e8 / 100) / 1e8,
8)
self.display_second_unit_value()
except Exception as e:
raise
def set_inputs_total_amount(self, amount):
self.inputs_total_amount = amount
self.re_calculate_second_unit_value()
def set_address(self, address):
self.edt_dest_address.setText(address)
def get_address(self):
addr = self.edt_dest_address.text()
if addr:
addr = addr.strip()
return addr
def set_output_value_unit(self, unit):
old_state = self.edt_amount.blockSignals(True)
try:
if unit == OUTPUT_VALUE_UNIT_AMOUNT:
self.edt_amount.setText(app_utils.to_string(self.value_amount))
self.lbl_amount.setText('value')
elif unit == OUTPUT_VALUE_UNIT_PERCENT:
self.edt_amount.setText(app_utils.to_string(self.value_percent))
self.lbl_amount.setText('pct. value')
else:
raise Exception('Invalid unit')
self.values_unit = unit
self.display_second_unit_value()
finally:
self.edt_amount.blockSignals(old_state)
self.edt_amount.update()
def set_style_sheet(self):
style = 'QLineEdit[invalid="true"]{border: 1px solid red}'
self.edt_dest_address.setStyleSheet(style)
self.edt_amount.setStyleSheet(style)
def validate(self):
valid = True
address = self.edt_dest_address.text()
if not address:
valid = False
elif not dash_utils.validate_address(address.strip(), self.app_config.dash_network):
valid = False
else:
self.message = None
if valid:
self.edt_dest_address.setProperty('invalid', False)
else:
self.edt_dest_address.setProperty('invalid', True)
amount = self.edt_amount.text()
try:
amount = float(amount)
if amount > 0.0:
self.edt_amount.setProperty('invalid', False)
else:
self.edt_amount.setProperty('invalid', True)
valid = False
except:
self.edt_amount.setProperty('invalid', True)
valid = False
self.set_style_sheet()
return valid
def clear_validation_results(self):
self.edt_amount.setProperty('invalid', False)
self.edt_dest_address.setProperty('invalid', False)
self.set_style_sheet()
class SendFundsDestination(QtWidgets.QWidget, WndUtils):
resized_signal = QtCore.pyqtSignal()
def __init__(self, parent, parent_dialog, app_config, hw_session: HwSessionInfo):
QtWidgets.QWidget.__init__(self, parent)
WndUtils.__init__(self, app_config=app_config)
self.parent_dialog = parent_dialog
self.hw_session = hw_session
self.recipients: List[SendFundsDestinationItem] = []
self.change_addresses: List[Tuple[str, str]] = [] # List[Tuple[address, bip32 path]]
self.change_controls_visible = True
self.address_widget_width = 150
self.inputs_total_amount = 0.0
self.fee_amount = 0.0
self.add_to_fee = 0.0
self.inputs_count = 0
self.change_amount = 0.0
self.values_unit = OUTPUT_VALUE_UNIT_AMOUNT
self.tm_calculate_change_value = QTimer(self)
self.current_file_name = ''
self.current_file_encrypted = False
self.recent_data_files = [] # recent used data files
self.setupUi(self)
def setupUi(self, Form):
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding))
self.lay_main = QtWidgets.QVBoxLayout(Form)
self.lay_main.setContentsMargins(6, 6, 6, 6)
self.lay_main.setSpacing(3)
# 'totals' area:
self.lbl_totals = QLabel(Form)
self.lbl_totals.setTextInteractionFlags(
QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByMouse)
self.lay_main.addWidget(self.lbl_totals)
# output definition data file labels:
self.lay_data_file = QHBoxLayout()
self.lay_data_file.setContentsMargins(0, 0, 0, 6)
self.lay_main.addItem(self.lay_data_file)
self.lbl_data_file_name = QLabel(Form)
self.lay_data_file.addWidget(self.lbl_data_file_name)
self.lbl_data_file_badge = QLabel(Form)
self.lay_data_file.addWidget(self.lbl_data_file_badge)
self.lbl_data_file_name.setTextInteractionFlags(
QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByMouse)
self.lbl_data_file_badge.setTextInteractionFlags(
QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByMouse)
self.lay_data_file.addStretch()
# actions/options area:
self.lay_actions = QHBoxLayout()
self.lay_actions.setSpacing(6)
self.lay_actions.setContentsMargins(0, 0, 0, 0)
self.lay_main.addItem(self.lay_actions)
self.btn_add_recipient = QPushButton(Form)
self.btn_add_recipient.clicked.connect(partial(self.add_dest_address, 1))
self.btn_add_recipient.setAutoDefault(False)
self.btn_add_recipient.setText("Add recipient")
self.lay_actions.addWidget(self.btn_add_recipient)
#
self.btn_actions = QPushButton(Form)
self.btn_actions.clicked.connect(partial(self.add_dest_address, 1))
self.btn_actions.setAutoDefault(False)
self.btn_actions.setText("Actions")
self.lay_actions.addWidget(self.btn_actions)
# context menu for the 'Actions' button
self.mnu_actions = QMenu()
self.btn_actions.setMenu(self.mnu_actions)
a = self.mnu_actions.addAction("Load from file...")
a.triggered.connect(self.on_read_from_file_clicked)
self.mnu_recent_files = self.mnu_actions.addMenu('Recent files')
self.mnu_recent_files.setVisible(False)
a = self.mnu_actions.addAction("Save to encrypted file...")
a.triggered.connect(partial(self.save_to_file, True))
a = self.mnu_actions.addAction("Save to plain CSV file...")
a.triggered.connect(partial(self.save_to_file, False))
a = self.mnu_actions.addAction("Clear recipients")
a.triggered.connect(self.clear_outputs)
self.lbl_output_unit = QLabel(Form)
self.lbl_output_unit.setText('Values as')
self.lay_actions.addWidget(self.lbl_output_unit)
self.cbo_output_unit = QComboBox(Form)
self.cbo_output_unit.addItems(['amount', 'percentage'])
self.cbo_output_unit.setCurrentIndex(0)
self.cbo_output_unit.currentIndexChanged.connect(self.on_cbo_output_unit_change)
self.lay_actions.addWidget(self.cbo_output_unit)
self.lay_actions.addStretch(0)
# scroll area for send to (destination) addresses
self.scroll_area = QtWidgets.QScrollArea()
self.scroll_area.setWidgetResizable(True)
self.scroll_area.setMinimumHeight(30)
self.scroll_area.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.MinimumExpanding))
self.scroll_area.setFrameShape(QtWidgets.QFrame.NoFrame)
self.lay_main.addWidget(self.scroll_area)
self.scroll_area_widget = QtWidgets.QWidget()
self.scroll_area_widget.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.MinimumExpanding))
self.lay_scroll_area = QtWidgets.QVBoxLayout()
self.lay_scroll_area.setContentsMargins(0, 0, 0, 0)
self.lay_scroll_area.setSpacing(0)
self.scroll_area_widget.setLayout(self.lay_scroll_area)
self.scroll_area.setWidget(self.scroll_area_widget)
# grid layout for destination addresses and their corresponding controls:
self.lay_addresses = QtWidgets.QGridLayout()
self.lay_addresses.setSpacing(3)
self.lay_addresses.setContentsMargins(0, 0, 0, 0)
self.lay_scroll_area.addLayout(self.lay_addresses)
# the last row of the grid layout is dedicated to 'fee' controls
self.lbl_fee = QLabel(self.scroll_area_widget)
self.lbl_fee.setText('Fee [Dash]')
self.lbl_fee.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lay_addresses.addWidget(self.lbl_fee, 1, 0)
# the fee value editbox with the 'use default' button:
self.lay_fee_value = QHBoxLayout()
self.lay_fee_value.setContentsMargins(0, 0, 0, 0)
self.lay_fee_value.setSpacing(0)
self.lay_addresses.addItem(self.lay_fee_value, 1, 1)
self.edt_fee_value = QLineEdit(self.scroll_area_widget)
self.edt_fee_value.setFixedWidth(100)
self.edt_fee_value.textChanged.connect(self.on_edt_fee_value_textChanged)
self.lay_fee_value.addWidget(self.edt_fee_value)
self.btn_get_default_fee = QToolButton(self.scroll_area_widget)
self.btn_get_default_fee.setText('\u2605')
self.btn_get_default_fee.setFixedSize(14, self.edt_fee_value.sizeHint().height())
self.btn_get_default_fee.setToolTip('Use default fee')
self.btn_get_default_fee.clicked.connect(self.on_btn_get_default_fee_clicked)
self.lay_fee_value.addWidget(self.btn_get_default_fee)
self.lbl_change_label = QLabel(self.scroll_area_widget)
self.lbl_change_label.setText(' The change: ')
self.lay_fee_value.addWidget(self.lbl_change_label)
self.lbl_change_value = QLabel(self.scroll_area_widget)
self.lbl_change_value.setText('')
self.lay_fee_value.addWidget(self.lbl_change_value)
self.lay_fee_value.addStretch(0)
# below the addresses grid place a label dedicated do display messages
self.lbl_message = QLabel(Form)
self.lbl_message.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByMouse)
self.lbl_message.setVisible(False)
self.lay_main.addWidget(self.lbl_message)
# add one 'send to' address row (in most cases it will bu sufficient)
self.add_dest_address(1)
# load last used file names from cache
mru = app_cache.get_value(CACHE_ITEM_DATA_FILE_MRU_LIST, default_value=[], type=list)
if isinstance(mru, list):
for file_name in mru:
if os.path.exists(file_name):
self.recent_data_files.append(file_name)
self.update_mru_menu_items()
self.retranslateUi(Form)
def retranslateUi(self, Form):
pass
def sizeHint(self):
sh = self.lay_scroll_area.sizeHint()
marg_sl = self.lay_scroll_area.getContentsMargins()
marg_ml = self.lay_main.getContentsMargins()
if self.lbl_message.isVisible():
msg_height = self.lbl_message.height()
else:
msg_height = 0
sh.setHeight(sh.height() + marg_sl[1] + marg_sl[3] + self.lay_actions.sizeHint().height() +
self.lbl_totals.sizeHint().height() +
self.lay_data_file.sizeHint().height() +
((self.lay_main.count() - 1) * self.lay_main.spacing()) + marg_ml[1] + marg_ml[3] + msg_height)
return sh
def display_message(self, message, color: Optional[str] = None):
if message:
self.lbl_message.setText(message)
if color:
self.lbl_message.setStyleSheet(f'QLabel{{color:{color}}}')
changed_visibility = self.lbl_message.isVisible() != True
self.lbl_message.setVisible(True)
else:
changed_visibility = self.lbl_message.isVisible() != False
self.lbl_message.setVisible(False)
if changed_visibility:
QtWidgets.qApp.processEvents(QEventLoop.ExcludeUserInputEvents)
self.resized_signal.emit()
def move_grid_layout_row(self, from_row, to_row):
for col_idx in range(self.lay_addresses.columnCount()):
item = self.lay_addresses.itemAtPosition(from_row, col_idx)
if item:
if isinstance(item, QWidgetItem):
w = item.widget()
self.lay_addresses.removeWidget(w)
self.lay_addresses.addWidget(w, to_row, col_idx)
elif isinstance(item, QLayout):
self.lay_addresses.removeItem(item)
self.lay_addresses.addItem(item, to_row, col_idx)
elif isinstance(item, QSpacerItem):
self.lay_addresses.removeItem(item)
self.lay_addresses.addItem(item, to_row, col_idx)
else:
raise Exception('Invalid item type')
def add_dest_address(self, address_count: int = 1):
# make a free space in the grid-layout for new addresses, just behind the last item related to the dest address
for row_idx in reversed(range(len(self.recipients), self.lay_addresses.rowCount())):
self.move_grid_layout_row(row_idx, row_idx + address_count)
for nr in range(address_count):
rcp_item = SendFundsDestinationItem(self.scroll_area_widget,
self.app_config,
self.lay_addresses,
len(self.recipients),
self.address_widget_width)
rcp_item.sig_remove_address.connect(self.remove_dest_address)
rcp_item.sig_use_all_funds.connect(self.use_all_funds_for_address)
rcp_item.sig_amount_changed.connect(self.on_dest_amount_changed)
rcp_item.set_output_value_unit(self.values_unit)
rcp_item.set_inputs_total_amount(self.inputs_total_amount - self.fee_amount)
self.recipients.append(rcp_item)
QtWidgets.qApp.processEvents(QEventLoop.ExcludeUserInputEvents)
self.resized_signal.emit()
self.show_hide_remove_buttons()
self.update_change_and_fee()
def remove_dest_address(self, address_item):
row_idx = self.recipients.index(address_item)
# remove all widgets related to the 'send to' address that is being removed
for col_idx in range(self.lay_addresses.columnCount()):
item = self.lay_addresses.itemAtPosition(row_idx, col_idx)
WndUtils.remove_item_from_layout(self.lay_addresses, item)
# move up all rows greater than the row being removed
for row in range(row_idx + 1, len(self.recipients)):
self.move_grid_layout_row(row, row - 1 )
del self.recipients[row_idx]
QtWidgets.qApp.processEvents(QEventLoop.ExcludeUserInputEvents)
self.resized_signal.emit()
self.show_hide_remove_buttons()
self.update_change_and_fee()
def use_all_funds_for_address(self, address_item):
row_idx = self.recipients.index(address_item)
sum = 0.0
left = 0.0
# sum all the funds in all rows other than the current one
for idx, addr in enumerate(self.recipients):
if idx != row_idx:
sum += addr.get_value(default_value=0.0)
if self.values_unit == OUTPUT_VALUE_UNIT_AMOUNT:
left = self.inputs_total_amount - sum - self.fee_amount
elif self.values_unit == OUTPUT_VALUE_UNIT_PERCENT:
left = 100.0 - sum
left = round(left, 8) + 0.0
if left < 0:
left = 0.0
address_item.set_value(left)
self.change_amount = 0.0
if self.values_unit == OUTPUT_VALUE_UNIT_AMOUNT:
self.update_fee()
ch = self.change_amount = self.calculate_the_change()
if ch < 0:
self.change_amount = ch
self.update_the_change_ui()
elif self.values_unit == OUTPUT_VALUE_UNIT_PERCENT:
# in this mode, due to the pct -> dash conversion for each of the outputs, there can be left a reminder,
# that has to be added to the change or the fee, depending on its value
self.update_change_and_fee()
def on_dest_amount_changed(self, dest_item: SendFundsDestinationItem):
self.debounce_call('dest_amount', self.update_change_and_fee, 400)
def get_number_of_recipients(self):
if self.change_amount > 0.0:
change_recipient = 1
else:
change_recipient = 0
return len(self.recipients) + change_recipient
def calculate_the_change(self) -> float:
"""Returns the change value in Dash."""
sum = 0.0
for idx, addr in enumerate(self.recipients):
amt = addr.get_value(default_value=0.0)
sum += amt
if self.values_unit == OUTPUT_VALUE_UNIT_AMOUNT:
change_amount = round(self.inputs_total_amount - sum - self.fee_amount, 8) + 0 # eliminate -0.0
elif self.values_unit == OUTPUT_VALUE_UNIT_PERCENT:
sum_amount = 0.0
for idx, addr in enumerate(self.recipients):
amt = addr.get_value_amount()
if amt:
sum_amount += amt
change_amount = round(self.inputs_total_amount - self.fee_amount - sum_amount, 8) + 0
else:
raise Exception('Invalid unit')
return change_amount
def calculate_fee(self, change_amount = None) -> float:
if change_amount is None:
change_amount = self.change_amount
recipients_count = len(self.recipients)
if change_amount > 0.0:
recipients_count += 1
if self.app_config.is_testnet:
fee_multiplier = 10 # in testnet large transactions tend to get stuck if the fee is "normal"
else:
fee_multiplier = 1
if self.inputs_total_amount > 0.0:
bytes = (self.inputs_count * 148) + (recipients_count * 34) + 10
fee = round(bytes * FEE_DUFF_PER_BYTE, 8)
if not fee:
fee = MIN_TX_FEE
fee = round(fee * fee_multiplier / 1e8, 8)
else:
fee = 0.0
return fee
def set_total_value_to_recipients(self):
for addr in self.recipients:
addr.set_inputs_total_amount(self.inputs_total_amount - self.fee_amount)
addr.clear_validation_results()
def update_change_and_fee(self):
self.fee_amount = self.calculate_fee()
recipients_count = self.get_number_of_recipients()
self.set_total_value_to_recipients()
self.change_amount = self.calculate_the_change()
self.add_to_fee = 0.0
if 0 < self.change_amount < 0.00000010:
self.add_to_fee = self.change_amount
self.change_amount = 0.0
if recipients_count != self.get_number_of_recipients():
# the fee was prevoiusly calculated for different number of outputs
# realculate it
self.fee_amount = self.calculate_fee()
self.set_total_value_to_recipients()
self.change_amount = self.calculate_the_change()
fee_and_reminder = round(self.fee_amount + self.add_to_fee, 8)
# apply the fee and the change values
edt_fee_old_state = self.edt_fee_value.blockSignals(True)
try:
self.edt_fee_value.setText(app_utils.to_string(fee_and_reminder))
finally:
self.edt_fee_value.blockSignals(edt_fee_old_state)
self.update_the_change_ui()
self.display_totals()
def update_fee(self):
self.fee_amount = self.calculate_fee()
self.set_total_value_to_recipients()
self.add_to_fee = 0.0
# apply the fee and the change values
edt_fee_old_state = self.edt_fee_value.blockSignals(True)
try:
self.edt_fee_value.setText(app_utils.to_string(round(self.fee_amount, 8)))
finally:
self.edt_fee_value.blockSignals(edt_fee_old_state)
self.update_the_change_ui()
self.display_totals()
def update_change(self):
self.change_amount = self.calculate_the_change()
self.add_to_fee = 0.0
# apply the fee and the change values
edt_change_old_state = self.edt_change_amount.blockSignals(True)
try:
self.edt_change_amount.setText(app_utils.to_string(self.change_amount))
finally:
self.edt_change_amount.blockSignals(edt_change_old_state)
self.update_the_change_ui()
self.display_totals()
def update_the_change_ui(self):
if self.change_amount <= 0:
self.lbl_change_label.setVisible(False)
self.lbl_change_value.setVisible(False)
else:
self.lbl_change_label.setVisible(True)
self.lbl_change_value.setVisible(True)
val_str = app_utils.to_string(round(self.change_amount, 8))
self.lbl_change_value.setText(val_str)
def read_fee_value_from_ui(self):
text = self.edt_fee_value.text()
if not text:
text = '0.0'
try:
self.fee_amount = float(text)
self.set_total_value_to_recipients()
self.update_change()
except Exception:
self.display_message('Invalid \'transaction fee\' value.', 'red') # display error message
def on_edt_fee_value_textChanged(self, text):
self.debounce_call('fee_value', self.read_fee_value_from_ui, 400)
def show_hide_change_address(self, visible):
if visible != self.change_controls_visible:
row_nr = self.lay_addresses.rowCount() - 1
if row_nr >= 0:
for col_idx in range(self.lay_addresses.columnCount()):
item = self.lay_addresses.itemAtPosition(row_nr, col_idx)
if item:
if isinstance(item, QWidgetItem):
item.widget().setVisible(visible)
elif isinstance(item, (QSpacerItem, QHBoxLayout, QVBoxLayout)):
pass
else:
raise Exception('Invalid item type')
self.change_controls_visible = visible
QtWidgets.qApp.processEvents(QEventLoop.ExcludeUserInputEvents)
self.resized_signal.emit()
def show_hide_remove_buttons(self):
visible = len(self.recipients) > 1
for item in self.recipients:
item.set_btn_remove_address_visible(visible)
def set_input_amount(self, amount, inputs_count):
self.inputs_count = inputs_count
if amount != self.inputs_total_amount or inputs_count != self.inputs_count:
# if there is only one recipient address and his current amount equals to the
# previuus input_amount, assign new value to him
last_total_amount = self.inputs_total_amount
last_fee_amount = self.fee_amount
self.inputs_total_amount = amount
self.change_amount = 0.0
self.fee_amount = self.calculate_fee()
if (len(self.recipients) == 1 or
self.recipients[0].get_value(default_value=0.0) == 0.0 or
self.recipients[0].get_value(default_value=0.0) == round(last_total_amount - last_fee_amount, 8)):
if self.values_unit == OUTPUT_VALUE_UNIT_AMOUNT:
amount_minus_fee = round(amount - self.fee_amount, 8)
if amount_minus_fee < 0:
amount_minus_fee = 0.0
self.recipients[0].set_value(amount_minus_fee)
elif self.values_unit == OUTPUT_VALUE_UNIT_PERCENT:
self.recipients[0].set_value(100.0)
for addr in self.recipients:
addr.set_inputs_total_amount(amount - self.fee_amount)
addr.clear_validation_results()
self.update_change_and_fee()
def validate_output_data(self) -> bool:
ret = True
for addr in self.recipients:
if not addr.validate():
ret = False
if not ret:
self.display_message('Data of at least one recipient is invalid or empty. '
'Please correct the data to continue.', 'red')
else:
self.display_message('')
return ret
def on_btn_get_default_fee_clicked(self):
self.update_change_and_fee()
def set_dest_addresses(self, addresses: List):
if len(addresses) > 0:
count_diff = len(addresses) - len(self.recipients)
if count_diff > 0:
self.add_dest_address(count_diff)
elif count_diff < 0:
# remove unecessary rows, beginning from the largest one
for nr in reversed(range(len(addresses), len(self.recipients))):
self.remove_dest_address(self.recipients[nr])
for idx, addr_item in enumerate(self.recipients):
if isinstance(addresses[idx], (list,tuple)):
# passed address-value tuple
if len(addresses[idx]) >= 1:
addr_item.set_address(addresses[idx][0])
if len(addresses[idx]) >= 2:
addr_item.set_value(addresses[idx][1])
else:
addr_item.set_address(addresses[idx])
self.display_totals()
def on_cbo_output_unit_change(self, index):
if index == 0:
self.values_unit = OUTPUT_VALUE_UNIT_AMOUNT
else:
self.values_unit = OUTPUT_VALUE_UNIT_PERCENT
for addr_item in self.recipients:
addr_item.set_output_value_unit(self.values_unit)
self.update_change_and_fee()
def update_ui_value_unit(self):
if self.values_unit == OUTPUT_VALUE_UNIT_AMOUNT:
self.cbo_output_unit.setCurrentIndex(0)
else:
self.cbo_output_unit.setCurrentIndex(1)
def simplyfy_file_home_dir(self, file_name):
home_dir = os.path.expanduser('~')
if self.current_file_name.find(home_dir) == 0:
file_name = '~' + self.current_file_name[len(home_dir):]
else:
file_name = self.current_file_name
return file_name
def display_totals(self):
recipients = self.get_number_of_recipients()
bytes = (self.inputs_count * 148) + (recipients * 34) + 10
text = f'<span class="label"><b>Total value of the selected inputs:</b> </span><span class="value"> {self.inputs_total_amount} Dash </span>'
if self.inputs_total_amount > 0:
text += f'<span class="label"> <b>Inputs:</b> </span><span class="value"> {self.inputs_count} </span>' \
f'<span class="label"> <b>Outputs:</b> </span><span class="value"> {recipients} </span>' \
f'<span class="label"> <b>Transaction size:</b> </span><span class="value"> {bytes} B </span>'
self.lbl_totals.setText(text)
if self.current_file_name:
file_name = self.simplyfy_file_home_dir(self.current_file_name)
text = f'<span class="label"><b>File:</b> </span><span class="value">{file_name} </span>'
self.lbl_data_file_name.setText(text)
self.lbl_data_file_name.setVisible(True)
self.lbl_data_file_badge.setVisible(True)
if self.current_file_encrypted:
self.lbl_data_file_badge.setText('Encrypted')
self.lbl_data_file_badge.setStyleSheet("QLabel{background-color:#2eb82e;color:white; padding: 1px 3px 1px 3px; border-radius: 3px;}")
else:
self.lbl_data_file_badge.setText('Not encrypted')
self.lbl_data_file_badge.setStyleSheet("QLabel{background-color:orange;color:white; padding: 1px 3px 1px 3px; border-radius: 3px;}")
else:
self.lbl_data_file_name.setVisible(False)
self.lbl_data_file_badge.setVisible(False)
def clear_outputs(self):
if WndUtils.query_dlg("Do you really want to clear all outputs?", default_button=QMessageBox.Cancel,
icon=QMessageBox.Warning) == QMessageBox.Ok:
self.set_dest_addresses([('', '')])
self.use_all_funds_for_address(self.recipients[0])
self.current_file_name = ''
self.update_mru_menu_items()
self.display_totals()
def save_to_file(self, save_encrypted):
if self.current_file_name and os.path.exists(os.path.dirname(self.current_file_name)):
dir = os.path.dirname(self.current_file_name)
else:
dir = self.app_config.data_dir
if save_encrypted:
initial_filter = "DAT files (*.dat)"
else:
initial_filter = "CSV files (*.csv)"
file_filter = f"{initial_filter};;All Files (*)"
file_name = WndUtils.save_file_query(
self.parent_dialog,
self.app_config,
message='Enter the file name to save the data.',
directory=dir,
filter=file_filter,
initial_filter=initial_filter)
if file_name:
data = bytes()
data += b'RECIPIENT_ADDRESS\tVALUE\n'
if self.values_unit == OUTPUT_VALUE_UNIT_PERCENT:
suffix = '%'
else:
suffix = ''
for addr in self.recipients:
line = f'{addr.get_address()}{CSV_SEPARATOR}{str(addr.get_value(default_value=""))}{suffix}\n'
data += line.encode('utf-8')
if save_encrypted:
try:
write_file_encrypted(file_name, self.hw_session, data)
except CancelException:
return
else:
with open(file_name, 'wb') as f_ptr:
f_ptr.write(data)
self.current_file_name = file_name
self.current_file_encrypted = save_encrypted
self.add_menu_item_to_mru(self.current_file_name)
self.update_mru_menu_items()
self.display_totals()
def on_read_from_file_clicked(self):
try:
if self.current_file_name and os.path.exists(os.path.dirname(self.current_file_name)):
dir = os.path.dirname(self.current_file_name)
else:
dir = self.app_config.data_dir
initial_filter1 = "DAT files (*.dat)"
initial_filter2 = "CSV files (*.csv)"
file_filter = f"{initial_filter1};;{initial_filter2};;All Files (*.*)"
file_name = WndUtils.open_file_query(
self.parent_dialog,
self.app_config,
message='Enter the file name to read the data.',
directory=dir,
filter=file_filter,
initial_filter='All Files (*.*)')
if file_name:
self.read_from_file(file_name)
except Exception as e:
self.parent_dialog.error_msg(str(e))
def read_from_file(self, file_name):
try:
file_info = {}
data_decrypted = bytearray()
try:
for block in read_file_encrypted(file_name, file_info, self.hw_session):
data_decrypted.extend(block)
except CancelException:
return
file_encrypted = file_info.get('encrypted', False)
data = data_decrypted.decode('utf-8')
addresses = []
value_unit = None
for line_idx, line in enumerate(data.split('\n')):
if line:
elems = line.split('\t')
if len(elems) < 2:
elems = line.split(';')
if len(elems) < 2:
raise ValueError(f'Invalid data file entry for line: {line_idx+1}.')
address = elems[0].strip()
value = elems[1].strip()
address_valid = dash_utils.validate_address(address, dash_network=None)
if not address_valid:
if line_idx == 0 and re.match(r'^[A-Za-z_]+$', address):
continue # header line
else:
raise ValueError(f'Invalid recipient address ({address}) (line {line_idx+1}).')
if value.endswith('%'):
vu = OUTPUT_VALUE_UNIT_PERCENT
value = value[:-1]
else:
vu = OUTPUT_VALUE_UNIT_AMOUNT
if value_unit is None:
value_unit = vu
elif value_unit != vu:
raise ValueError(f'The value unit in line {line_idx+1} differs from the previous '
f'line.')
try:
if value:
value = float(value.replace(',', '.'))
else:
value = None
addresses.append((address, value))
except Exception as e:
raise ValueError(f'Invalid data in the \'value\' field (line {line_idx+1}).')
if len(addresses) == 0:
raise Exception('File doesn\'t contain any recipient\'s data.')
else:
if self.values_unit != value_unit:
self.values_unit = value_unit
self.update_ui_value_unit()
self.set_dest_addresses(addresses)
self.current_file_name = file_name
self.current_file_encrypted = file_encrypted
self.add_menu_item_to_mru(self.current_file_name)
self.update_mru_menu_items()
self.update_change_and_fee()
except Exception as e:
self.update_mru_menu_items()
logging.exception('Exception while reading file with recipients data.')
self.parent_dialog.error_msg(str(e))
def add_menu_item_to_mru(self, file_name: str) -> None:
if file_name:
try:
if file_name in self.recent_data_files:
idx = self.recent_data_files.index(file_name)
del self.recent_data_files[idx]
self.recent_data_files.insert(0, file_name)
else:
self.recent_data_files.insert(0, file_name)
app_cache.set_value(CACHE_ITEM_DATA_FILE_MRU_LIST, self.recent_data_files)
except Exception as e:
logging.warning(str(e))
def update_mru_menu_items(self):
app_utils.update_mru_menu_items(self.recent_data_files, self.mnu_recent_files,
self.on_data_file_mru_action_triggered,
self.current_file_name,
self.on_act_clear_mru_items)
def on_act_clear_mru_items(self):
self.recent_data_files.clear()
app_cache.set_value(CACHE_ITEM_DATA_FILE_MRU_LIST, self.recent_data_files)
self.update_mru_menu_items()
def on_data_file_mru_action_triggered(self, file_name: str) -> None:
""" Triggered by clicking one of the subitems of the 'Open Recent' menu item. Each subitem is
related to one of recently openend data files.
:param file_name: A data file name accociated with the menu action clicked.
"""
self.read_from_file(file_name)
def get_tx_destination_data(self) -> List[TxOutputType]:
if self.validate_output_data():
if self.change_amount < 0.0:
raise Exception('Not enough funds!!!')
dest_data = []
for addr in self.recipients:
out = TxOutputType()
out.address = addr.get_address()
out.satoshis = round(addr.get_value_amount() * 1e8)
dest_data.append(out)
return dest_data
else:
return []
def get_recipients_list(self) -> List[Tuple[str,]]:
"""
:return: List of recipient addresses
List[Tuple[str <address>, float <value>]
"""
dest_data = []
for addr in self.recipients:
dest_addr = addr.get_address()
if dest_addr:
dest_data.append((dest_addr,))
return dest_data
def get_tx_fee(self) -> int:
if self.fee_amount + self.add_to_fee < 0.0:
raise Exception('Invalid the fee value.')
return round((self.fee_amount + self.add_to_fee) * 1e8)
class WalletMnItemDelegate(QItemDelegate):
"""
"""
CellVerticalMargin = 3
CellHorizontalMargin = 6
CellLinesMargin = 2
def __init__(self, parent):
QItemDelegate.__init__(self, parent)
def createEditor(self, parent, option, index):
return None
def paint(self, painter, option: QStyleOptionViewItem, index: QModelIndex):
if index.isValid():
has_focus = self.parent().hasFocus()
mn = index.data()
painter.save()
painter.setPen(QPen(Qt.NoPen))
if option.state & QStyle.State_Selected:
if has_focus:
primary_color = option.palette.color(QPalette.Normal, option.palette.HighlightedText)
secondary_color = primary_color
painter.setBrush(QBrush(option.palette.color(QPalette.Active, option.palette.Highlight)))
else:
primary_color = option.palette.color(QPalette.Inactive, option.palette.HighlightedText)
secondary_color = primary_color
painter.setBrush(QBrush(option.palette.color(QPalette.Inactive, option.palette.Highlight)))
else:
painter.setBrush(QBrush(option.palette.color(QPalette.Normal, option.palette.Base)))
primary_color = option.palette.color(QPalette.Normal, option.palette.WindowText)
secondary_color = option.palette.color(QPalette.Disabled, option.palette.WindowText)
painter.drawRect(option.rect)
# draw the masternode description
option.font.setBold(True)
painter.setPen(QPen(primary_color))
painter.setFont(option.font)
r = option.rect
fm = option.fontMetrics
r.setLeft(r.left() + WalletMnItemDelegate.CellHorizontalMargin)
r.setTop(r.top() + WalletMnItemDelegate.CellVerticalMargin)
painter.drawText(r, Qt.AlignLeft, mn.masternode.name)
# draw the mn address balance below:
option.font.setBold(False)
option.font.setPointSize(option.font.pointSize() - 2)
painter.setPen(QPen(secondary_color))
painter.setFont(option.font)
r.setTop(r.top() + fm.height() + WalletMnItemDelegate.CellLinesMargin)
if mn.address.balance is not None:
balance_str = 'Balance: ' + app_utils.to_string(mn.address.balance / 1e8) + ' Dash'
else:
balance_str = 'Balance: unknown'
painter.drawText(r, Qt.AlignLeft, balance_str)
painter.restore()
def sizeHint(self, option, index):
sh = QItemDelegate.sizeHint(self, option, index)
fm = option.fontMetrics
h = WalletMnItemDelegate.CellVerticalMargin * 2 + WalletMnItemDelegate.CellLinesMargin
h += (fm.height() * 2) - 2
sh.setHeight(h)
return sh
class WalletAccountItemDelegate(QItemDelegate):
"""
"""
CellVerticalMargin = 2
CellHorizontalMargin = 2
CellLinesMargin = 2
def __init__(self, parent):
QItemDelegate.__init__(self, parent)
def createEditor(self, parent, option, index):
# don't show editor allowing to copy an address for security reasons we have to compare it with
# the address read from hardware wallet
return None
def paint(self, painter, option: QStyleOptionViewItem, index: QModelIndex):
if index.isValid():
has_focus = self.parent().hasFocus()
is_dark_theme = is_color_dark(option.palette.color(QPalette.Normal, option.palette.Window))
data = index.data()
painter.save()
painter.setPen(QPen(Qt.NoPen))
if option.state & QStyle.State_Selected:
if has_focus:
primary_color = option.palette.color(QPalette.Normal, option.palette.HighlightedText)
secondary_color = primary_color
painter.setBrush(QBrush(option.palette.color(QPalette.Normal, option.palette.Highlight)))
else:
primary_color = option.palette.color(QPalette.Inactive, option.palette.HighlightedText)
secondary_color = primary_color
painter.setBrush(QBrush(option.palette.color(QPalette.Inactive, option.palette.Highlight)))
else:
painter.setBrush(QBrush(option.palette.color(QPalette.Normal, option.palette.Base)))
primary_color = option.palette.color(QPalette.Normal, option.palette.WindowText)
secondary_color = option.palette.color(QPalette.Disabled, option.palette.WindowText)
painter.drawRect(option.rect)
r = option.rect
fm = option.fontMetrics
r.setLeft(r.left() + WalletMnItemDelegate.CellHorizontalMargin)
r.setTop(r.top() + WalletMnItemDelegate.CellVerticalMargin)
if isinstance(data, Bip44AccountType):
option.font.setBold(True)
painter.setPen(QPen(primary_color))
painter.setFont(option.font)
painter.drawText(r, Qt.AlignLeft, data.get_account_name())
option.font.setBold(False)
option.font.setPointSize(option.font.pointSize() - 2)
painter.setPen(QPen(secondary_color))
painter.setFont(option.font)
r.setTop(r.top() + fm.height() + WalletMnItemDelegate.CellLinesMargin)
if data.balance is not None:
balance_str = 'Balance: ' + app_utils.to_string(data.balance / 1e8) + ' Dash'
else:
balance_str = 'Balance: unknown'
painter.drawText(r, Qt.AlignLeft, balance_str)
elif isinstance(data, Bip44AddressType):
option.font.setPointSize(option.font.pointSize() - 2)
if option.state & QStyle.State_Selected:
if has_focus:
color = primary_color
else:
color = primary_color
else:
if data.balance > 0:
color = primary_color
else:
if data.received > 0:
color = secondary_color
else:
if is_dark_theme:
color = Qt.green
else:
color = Qt.darkGreen
painter.setPen(QPen(color))
painter.setFont(option.font)
fm = QFontMetrics(option.font)
if not data.is_change:
idx_str = f'0/{data.address_index}: '
else:
idx_str = f'1/{data.address_index}: '
painter.drawText(r, Qt.AlignLeft, idx_str)
r.setLeft(r.left() + fm.width('1/000: '))
if data.label:
t = data.label
else:
t = data.address
painter.drawText(r, Qt.AlignLeft, t)
painter.restore()
def sizeHint(self, option, index):
sh = QItemDelegate.sizeHint(self, option, index)
if index.isValid():
data = index.data()
if isinstance(data, Bip44AccountType):
fm = option.fontMetrics
h = WalletMnItemDelegate.CellVerticalMargin * 2 + WalletMnItemDelegate.CellLinesMargin
h += (fm.height() * 2) - 2
sh.setHeight(h)
return sh
class TxSenderRecipientItemDelegate(QItemDelegate):
""" Displays a recipient data in the transactions table view. """
CellVerticalMargin = 2
CellHorizontalMargin = 2
CellLinesMargin = 2
def __init__(self, parent, is_sender):
QItemDelegate.__init__(self, parent)
self.is_sender = is_sender
def createEditor(self, parent, option, index):
e = None
if index.isValid():
tx = index.data()
if not tx:
return
if self.is_sender:
addr_list = tx.sender_addrs
else:
addr_list = tx.recipient_addrs
e = QLineEdit(parent)
e.setReadOnly(True)
addrs = []
for a in addr_list:
if isinstance(a, Bip44AddressType):
addrs.append(a.address)
else:
addrs.append(a)
e.setText(', '.join(addrs))
return e
def paint(self, painter, option: QStyleOptionViewItem, index: QModelIndex):
if index.isValid():
has_focus = self.parent().hasFocus()
is_dark_theme = is_color_dark(option.palette.color(QPalette.Normal, option.palette.Window))
tx = index.data()
if not tx:
return
if self.is_sender:
addr_list = tx.sender_addrs
else:
addr_list = tx.recipient_addrs
painter.save()
painter.setPen(QPen(Qt.NoPen))
if option.state & QStyle.State_Selected:
if has_focus:
painter.setBrush(QBrush(option.palette.color(QPalette.Active, option.palette.Highlight)))
else:
painter.setBrush(QBrush(option.palette.color(QPalette.Inactive, option.palette.Highlight)))
else:
painter.setBrush(QBrush(option.palette.color(QPalette.Normal, option.palette.Base)))
painter.drawRect(option.rect)
r = option.rect
r.setLeft(r.left() + TxSenderRecipientItemDelegate.CellHorizontalMargin)
r.setTop(r.top() + TxSenderRecipientItemDelegate.CellVerticalMargin)
if isinstance(addr_list, list):
painter.setFont(option.font)
fm = option.fontMetrics
if addr_list:
for addr in addr_list:
if option.state & QStyle.State_Selected:
if has_focus:
fg_color = option.palette.color(QPalette.Normal, option.palette.HighlightedText)
else:
fg_color = option.palette.color(QPalette.Inactive, option.palette.HighlightedText)
else:
if isinstance(addr, Bip44AddressType):
if is_dark_theme:
fg_color = Qt.green
else:
fg_color = Qt.darkGreen
else:
fg_color = option.palette.color(QPalette.Normal, option.palette.WindowText)
if isinstance(addr, Bip44AddressType):
text = addr.address
else:
text = addr
painter.setPen(QPen(fg_color))
painter.drawText(r, Qt.AlignLeft, text)
r.setTop(r.top() + fm.height() + WalletMnItemDelegate.CellLinesMargin)
else:
if self.is_sender and tx.is_coinbase:
if option.state & QStyle.State_Selected:
if has_focus:
fg_color = option.palette.color(QPalette.Normal, option.palette.HighlightedText)
else:
fg_color = option.palette.color(QPalette.Inactive, option.palette.HighlightedText)
else:
fg_color = Qt.darkGray
painter.setPen(QPen(fg_color))
painter.drawText(r, Qt.AlignLeft, '[New coins]')
painter.restore()
def sizeHint(self, option, index):
sh = QItemDelegate.sizeHint(self, option, index)
if index.isValid():
tx = index.data()
if not tx:
return
if self.is_sender:
addr_list = tx.sender_addrs
else:
addr_list = tx.recipient_addrs
if isinstance(addr_list, list):
w = 0
fm = option.fontMetrics
ln = -1
for ln, a in enumerate(addr_list):
if isinstance(a, Bip44AddressType):
w = max(w, fm.width(a.address))
else:
w = max(w, fm.width(a))
ln += 1
if ln:
h = WalletMnItemDelegate.CellVerticalMargin * 2 + WalletMnItemDelegate.CellLinesMargin
h += (fm.height() * ln) - 2
sh.setHeight(h)
sh.setWidth(w)
return sh
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.estimator.inputs.pandas_import import HAS_PANDAS
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
if HAS_PANDAS:
# pylint: disable=g-import-not-at-top
import pandas as pd
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
from xml.parsers import expat
from oslo.utils import timeutils
from oslo_config import cfg
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import db
from cinder import exception
from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume import api as volume_api
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'hosts')
class HostIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hosts')
elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts')
elem.set('service-status')
elem.set('service')
elem.set('zone')
elem.set('service-state')
elem.set('host_name')
elem.set('last-update')
return xmlutil.MasterTemplate(root, 1)
class HostUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('status')
return xmlutil.MasterTemplate(root, 1)
class HostActionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
return xmlutil.MasterTemplate(root, 1)
class HostShowTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
elem = xmlutil.make_flat_dict('resource', selector='host',
subselector='resource')
root.append(elem)
return xmlutil.MasterTemplate(root, 1)
class HostDeserializer(wsgi.XMLDeserializer):
def default(self, string):
try:
node = utils.safe_minidom_parse_string(string)
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
updates = {}
for child in node.childNodes[0].childNodes:
updates[child.tagName] = self.extract_text(child)
return dict(body=updates)
def _list_hosts(req, service=None):
"""Returns a summary list of hosts."""
curr_time = timeutils.utcnow()
context = req.environ['cinder.context']
services = db.service_get_all(context, False)
zone = ''
if 'zone' in req.GET:
zone = req.GET['zone']
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for host in services:
delta = curr_time - (host['updated_at'] or host['created_at'])
alive = abs(delta.total_seconds()) <= CONF.service_down_time
status = (alive and "available") or "unavailable"
active = 'enabled'
if host['disabled']:
active = 'disabled'
LOG.debug('status, active and update: %s, %s, %s' %
(status, active, host['updated_at']))
hosts.append({'host_name': host['host'],
'service': host['topic'],
'zone': host['availability_zone'],
'service-status': status,
'service-state': active,
'last-update': host['updated_at']})
if service:
hosts = [host for host in hosts
if host["service"] == service]
return hosts
def check_host(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, service=None, *args, **kwargs):
listed_hosts = _list_hosts(req, service)
hosts = [h["host_name"] for h in listed_hosts]
if id in hosts:
return fn(self, req, id, *args, **kwargs)
else:
message = _("Host '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=message)
return wrapped
class HostController(wsgi.Controller):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = volume_api.HostAPI()
super(HostController, self).__init__()
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
authorize(req.environ['cinder.context'])
return {'hosts': _list_hosts(req)}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostDeserializer)
@check_host
def update(self, req, id, body):
authorize(req.environ['cinder.context'])
update_values = {}
for raw_key, raw_val in body.iteritems():
key = raw_key.lower().strip()
val = raw_val.lower().strip()
if key == "status":
if val in ("enable", "disable"):
update_values['status'] = val.startswith("enable")
else:
explanation = _("Invalid status: '%s'") % raw_val
raise webob.exc.HTTPBadRequest(explanation=explanation)
else:
explanation = _("Invalid update setting: '%s'") % raw_key
raise webob.exc.HTTPBadRequest(explanation=explanation)
update_setters = {'status': self._set_enabled_status}
result = {}
for key, value in update_values.iteritems():
result.update(update_setters[key](req, id, value))
return result
def _set_enabled_status(self, req, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
context = req.environ['cinder.context']
state = "enabled" if enabled else "disabled"
LOG.info(_LI("Setting host %(host)s to %(state)s."),
{'host': host, 'state': state})
result = self.api.set_host_enabled(context,
host=host,
enabled=enabled)
if result not in ("enabled", "disabled"):
# An error message was returned
raise webob.exc.HTTPBadRequest(explanation=result)
return {"host": host, "status": result}
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the volume usage info given by hosts.
:param context: security context
:param host: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'volume_count': 1, 'total_volume_gb': 2048}
"""
host = id
context = req.environ['cinder.context']
if not context.is_admin:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
try:
host_ref = db.service_get_by_host_and_topic(context,
host,
CONF.volume_topic)
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound(explanation=_("Host not found"))
# Getting total available/used resource
# TODO(jdg): Add summary info for Snapshots
volume_refs = db.volume_get_all_by_host(context, host_ref['host'])
(count, sum) = db.volume_data_get_for_host(context,
host_ref['host'])
snap_count_total = 0
snap_sum_total = 0
resources = [{'resource': {'host': host, 'project': '(total)',
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count_total),
'total_snapshot_gb': str(snap_sum_total)}}]
project_ids = [v['project_id'] for v in volume_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
(count, sum) = db.volume_data_get_for_project(context, project_id)
(snap_count, snap_sum) = db.snapshot_data_get_for_project(
context,
project_id)
resources.append(
{'resource':
{'host': host,
'project': project_id,
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count),
'total_snapshot_gb': str(snap_sum)}})
snap_count_total += int(snap_count)
snap_sum_total += int(snap_sum)
resources[0]['resource']['snapshot_count'] = str(snap_count_total)
resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total)
return {"host": resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration."""
name = "Hosts"
alias = "os-hosts"
namespace = "http://docs.openstack.org/volume/ext/hosts/api/v1.1"
updated = "2011-06-29T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={
'update': 'PUT'},
member_actions={
'startup': 'GET',
'shutdown': 'GET',
'reboot': 'GET'})]
return resources
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import girder_client
import json
import mock
import os
import shutil
import six
from girder import config
from tests import base
os.environ['GIRDER_PORT'] = os.environ.get('GIRDER_TEST_PORT', '20200')
config.loadConfig() # Must reload config to pickup correct port
def setUpModule():
plugins = os.environ.get('ENABLED_PLUGINS', '')
if plugins:
base.enabledPlugins.extend(plugins.split())
base.startServer(False)
def tearDownModule():
base.stopServer()
class PythonClientTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
def writeFile(dirName):
filename = os.path.join(dirName, 'f')
f = open(filename, 'w')
f.write(filename)
f.close()
# make some temp dirs and files
self.libTestDir = os.path.join(os.path.dirname(__file__),
'_libTestDir')
os.mkdir(self.libTestDir)
writeFile(self.libTestDir)
for subDir in range(0, 3):
subDirName = os.path.join(self.libTestDir, 'sub'+str(subDir))
os.mkdir(subDirName)
writeFile(subDirName)
def tearDown(self):
shutil.rmtree(self.libTestDir, ignore_errors=True)
base.TestCase.tearDown(self)
def testRestCore(self):
client = girder_client.GirderClient(port=os.environ['GIRDER_PORT'])
# Register a user
user = client.createResource('user', params={
'firstName': 'First',
'lastName': 'Last',
'login': 'mylogin',
'password': 'password',
'email': '[email protected]'
})
self.assertTrue(user['admin'])
# Test authentication with bad args
flag = False
try:
client.authenticate()
except Exception:
flag = True
self.assertTrue(flag)
# Test authentication failure
flag = False
try:
client.authenticate(username=user['login'], password='wrong')
except girder_client.AuthenticationError:
flag = True
self.assertTrue(flag)
# Interactive login (successfully)
with mock.patch('six.moves.input', return_value=user['login']),\
mock.patch('getpass.getpass', return_value='password'):
client.authenticate(interactive=True)
# /user/me should now return our user info
user = client.getResource('user/me')
self.assertEqual(user['login'], 'mylogin')
# Test HTTP error case
flag = False
try:
client.getResource('user/badId')
except girder_client.HttpError as e:
self.assertEqual(e.status, 400)
self.assertEqual(e.method, 'GET')
resp = json.loads(e.responseText)
self.assertEqual(resp['type'], 'validation')
self.assertEqual(resp['field'], 'id')
self.assertEqual(resp['message'], 'Invalid ObjectId: badId')
flag = True
self.assertTrue(flag)
# Test some folder routes
folders = client.listFolder(
parentId=user['_id'], parentFolderType='user')
self.assertEqual(len(folders), 2)
privateFolder = publicFolder = None
for folder in folders:
if folder['name'] == 'Public':
publicFolder = folder
elif folder['name'] == 'Private':
privateFolder = folder
self.assertNotEqual(privateFolder, None)
self.assertNotEqual(publicFolder, None)
self.assertEqual(client.getFolder(privateFolder['_id']), privateFolder)
acl = client.getFolderAccess(privateFolder['_id'])
self.assertIn('users', acl)
self.assertIn('groups', acl)
client.setFolderAccess(privateFolder['_id'], json.dumps(acl),
public=False)
self.assertEqual(acl, client.getFolderAccess(privateFolder['_id']))
# Test recursive ACL propagation (not very robust test yet)
client.createFolder(privateFolder['_id'], name='Subfolder')
client.inheritAccessControlRecursive(privateFolder['_id'])
def testUploadCallbacks(self):
callbackUser = self.model('user').createUser(
firstName='Callback', lastName='Last', login='callback',
password='password', email='[email protected]')
callbackPublicFolder = six.next(self.model('folder').childFolders(
parentType='user', parent=callbackUser, user=None, limit=1))
callback_counts = {'folder': 0, 'item': 0}
folders = {}
items = {}
folders[self.libTestDir] = False
folder_count = 1 # 1 for self.libTestDir
item_count = 0
for root, dirs, files in os.walk(self.libTestDir):
for name in files:
items[os.path.join(root, name)] = False
item_count += 1
for name in dirs:
folders[os.path.join(root, name)] = False
folder_count += 1
def folder_callback(folder, filepath):
self.assertIn(filepath, six.viewkeys(folders))
folders[filepath] = True
callback_counts['folder'] += 1
def item_callback(item, filepath):
self.assertIn(filepath, six.viewkeys(items))
items[filepath] = True
callback_counts['item'] += 1
client = girder_client.GirderClient(port=os.environ['GIRDER_PORT'])
client.authenticate('callback', 'password')
client.add_folder_upload_callback(folder_callback)
client.add_item_upload_callback(item_callback)
client.upload(self.libTestDir, callbackPublicFolder['_id'])
# make sure counts are the same (callbacks not called more than once)
# and that all folders and files have callbacks called on them
self.assertEqual(folder_count, callback_counts['folder'])
self.assertEqual(item_count, callback_counts['item'])
self.assertTrue(all(six.viewvalues(items)))
self.assertTrue(all(six.viewvalues(folders)))
# Upload again with reuse_existing on
existingList = list(self.model('folder').childFolders(
parentType='folder', parent=callbackPublicFolder,
user=callbackUser, limit=0))
client.upload(self.libTestDir, callbackPublicFolder['_id'],
reuse_existing=True)
newList = list(self.model('folder').childFolders(
parentType='folder', parent=callbackPublicFolder,
user=callbackUser, limit=0))
self.assertEqual(existingList, newList)
self.assertEqual(len(newList), 1)
self.assertEqual([f['name'] for f in self.model('folder').childFolders(
parentType='folder', parent=newList[0],
user=callbackUser, limit=0)], ['sub0', 'sub1', 'sub2'])
# Test upload via a file-like object into a folder
callbacks = []
path = os.path.join(self.libTestDir, 'sub0', 'f')
size = os.path.getsize(path)
def progressCallback(info):
callbacks.append(info)
with open(path) as f:
with self.assertRaises(girder_client.IncorrectUploadLengthError):
try:
client.uploadFile(
callbackPublicFolder['_id'], stream=f, name='test',
size=size + 1, parentType='folder')
except girder_client.IncorrectUploadLengthError as exc:
self.assertEqual(
exc.upload['received'], exc.upload['size'] - 1)
upload = self.model('upload').load(exc.upload['_id'])
self.assertEqual(upload, None)
raise
with open(path) as f:
file = client.uploadFile(
callbackPublicFolder['_id'], stream=f, name='test', size=size,
parentType='folder', progressCallback=progressCallback)
self.assertEqual(len(callbacks), 1)
self.assertEqual(callbacks[0]['current'], size)
self.assertEqual(callbacks[0]['total'], size)
self.assertEqual(file['name'], 'test')
self.assertEqual(file['size'], size)
self.assertEqual(file['mimeType'], 'application/octet-stream')
items = list(
self.model('folder').childItems(folder=callbackPublicFolder))
self.assertEqual(len(items), 1)
self.assertEqual(items[0]['name'], 'test')
files = list(self.model('item').childFiles(items[0]))
self.assertEqual(len(files), 1)
|
|
from rest_framework import status
from hs_core.hydroshare import resource
from hs_core.hydroshare.utils import resource_post_create_actions
from .base import HSRESTTestCase
class TestResourceScienceMetadata(HSRESTTestCase):
def setUp(self):
super(TestResourceScienceMetadata, self).setUp()
self.rtype = 'GenericResource'
self.title = 'My Test resource'
res = resource.create_resource(self.rtype,
self.user,
self.title)
self.pid = res.short_id
self.resources_to_delete.append(self.pid)
# create another resource for testing relation metadata
another_res = resource.create_resource('GenericResource',
self.user,
'My another Test resource')
self.pid2 = another_res.short_id
self.resources_to_delete.append(self.pid2)
def test_get_scimeta(self):
# Get the resource system metadata
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(res_id=self.pid)
response = self.client.get(sysmeta_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# content = json.loads(response.content)
def test_put_scimeta_generic_resource(self):
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(res_id=self.pid)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": None,
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
],
"relations": [
{
"type": "isCopiedFrom",
"value": "https://www.hydroshare.org/resource/{}/".format(self.pid2)
},
{
"type": "isExecutedBy",
"value": "https://www.hydroshare.org/resource/{}/".format(self.pid2)
}
],
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_put_scimeta_generic_resource_double_none(self):
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(res_id=self.pid)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": None,
"organization": "Org 2"
}],
"creators": [
{
"name": "Creator",
"organization": None
},
{
"name": None,
"organization": None
}
],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
]
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_put_scimeta_composite_resource_with_core_metadata(self):
# testing bulk metadata update that includes only core metadata
# create a composite resource
self._create_resource(resource_type="CompositeResource")
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
]
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_composite_resource_with_core_metadata_failure(self):
# testing bulk metadata update with only core metadata that includes coverage metadata
# coverage metadata can't be updated for composite resource - this bulk update should fail
# create a composite resource
self._create_resource(resource_type="CompositeResource")
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
]
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.resource.delete()
def test_put_scimeta_timeseries_resource_with_core_metadata(self):
# testing bulk metadata update that includes only core metadata
# create a composite resource
self._create_resource(resource_type="TimeSeriesResource")
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
]
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_timeseries_resource_with_core_metadata_failure(self):
# testing bulk metadata update with only core metadata that includes coverage metadata
# coverage metadata can't be updated for time series resource - this bulk update should fail
# create a composite resource
self._create_resource(resource_type="TimeSeriesResource")
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
]
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.resource.delete()
def test_put_scimeta_netcdf_resource_with_core_metadata(self):
# testing bulk metadata update that includes both core metadata and resource specific
# metadata update
# create a netcdf resource
netcdf_file = 'hs_core/tests/data/netcdf_valid.nc'
file_to_upload = open(netcdf_file, "r")
self._create_resource(resource_type="GenericResource", file_to_upload=file_to_upload)
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
],
"originalcoverage": {
"value": {
"northlimit": '12', "projection": "transverse_mercator",
"units": "meter", "southlimit": '10',
"eastlimit": '23', "westlimit": '2'
},
"projection_string_text": '+proj=tmerc +lon_0=-111.0 +lat_0=0.0 +x_0=500000.0 '
'+y_0=0.0 +k_0=0.9996',
"projection_string_type": 'Proj4 String'
},
"variables": [
{
"name": "SWE",
"type": "Float",
"shape": "y,x,time",
"unit": "m",
"missing_value": "-9999",
"descriptive_name": "Snow water equivalent",
"method": "model simulation of UEB"
},
{
"name": "x",
"unit": "Centimeter"
}
]
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_netcdf_resource_without_core_metadata(self):
# testing bulk metadata update that only updates resource specific metadata
# create a netcdf resource
netcdf_file = 'hs_core/tests/data/netcdf_valid.nc'
file_to_upload = open(netcdf_file, "r")
self._create_resource(resource_type="GenericResource", file_to_upload=file_to_upload)
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"originalcoverage": {
"value": {
"northlimit": '12', "projection": "transverse_mercator",
"units": "meter", "southlimit": '10',
"eastlimit": '23', "westlimit": '2'
},
"projection_string_text": '+proj=tmerc +lon_0=-111.0 +lat_0=0.0 +x_0=500000.0 '
'+y_0=0.0 +k_0=0.9996',
"projection_string_type": 'Proj4 String'
},
"variables": [
{
"name": "SWE",
"type": "Float",
"shape": "y,x,time",
"unit": "m",
"missing_value": "-9999",
"descriptive_name": "Snow water equivalent",
"method": "model simulation of UEB"
},
{
"name": "x",
"unit": "Centimeter"
}
]
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_raster_resource_with_core_metadata(self):
# testing bulk metadata update that includes both core metadata and resource specific
# metadata update (Note: the only resource specific metadata element that can be updated
# is BandInformation)
# create a raster resource
raster_file = 'hs_core/tests/data/cea.tif'
file_to_upload = open(raster_file, "r")
self._create_resource(resource_type="RasterResource", file_to_upload=file_to_upload)
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
],
"bandinformations": [
{'original_band_name': 'Band_1',
'name': 'Band_2',
'variableName': 'digital elevation',
'variableUnit': 'meter',
'method': 'this is method',
'comment': 'this is comment',
'maximumValue': 1000,
'minimumValue': 0,
'noDataValue': -9999
}
]
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_raster_resource_without_core_metadata(self):
# testing bulk metadata update that includes only resource specific
# metadata update (Note: the only resource specific metadata element that can be updated
# is BandInformation)
# create a raster resource
raster_file = 'hs_core/tests/data/cea.tif'
file_to_upload = open(raster_file, "r")
self._create_resource(resource_type="RasterResource", file_to_upload=file_to_upload)
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"bandinformations": [
{'original_band_name': 'Band_1',
'name': 'Band_2',
'variableName': 'digital elevation',
'variableUnit': 'meter',
'method': 'this is method',
'comment': 'this is comment',
'maximumValue': 1000,
'minimumValue': 0,
'noDataValue': -9999
}
]
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_modelprogram_resource_with_core_metadata(self):
# testing bulk metadata update that includes both core metadata and resource specific
# metadata update
# create a model program resource
some_file = 'hs_core/tests/data/cea.tif'
file_to_upload = open(some_file, "r")
self._create_resource(resource_type="ModelProgramResource", file_to_upload=file_to_upload)
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
],
"mpmetadata": {
"modelVersion": "5.1.011",
"modelProgramLanguage": "Fortran",
"modelOperatingSystem": "Windows",
"modelReleaseDate": "2016-10-24T21:05:00.315907+00:00",
"modelWebsite": "http://www.hydroshare.org",
"modelCodeRepository": "http://www.github.com",
"modelReleaseNotes": "releaseNote.pdf",
"modelDocumentation": "manual.pdf",
"modelSoftware": "utilities.exe",
"modelEngine": "sourceCode.zip"
}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_modelprogram_resource_without_core_metadata(self):
# testing bulk metadata update that only updates resource specific
# metadata
# create a model program resource
some_file = 'hs_core/tests/data/cea.tif'
file_to_upload = open(some_file, "r")
self._create_resource(resource_type="ModelProgramResource", file_to_upload=file_to_upload)
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"mpmetadata": {
"modelVersion": "5.1.011",
"modelProgramLanguage": "Fortran",
"modelOperatingSystem": "Windows",
"modelReleaseDate": "2016-10-24T21:05:00.315907+00:00",
"modelWebsite": "http://www.hydroshare.org",
"modelCodeRepository": "http://www.github.com",
"modelReleaseNotes": "releaseNote.pdf",
"modelDocumentation": "manual.pdf",
"modelSoftware": "utilities.exe",
"modelEngine": "sourceCode.zip"
}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_modelinstance_resource_with_core_metadata(self):
# testing bulk metadata update that includes both core metadata and resource specific
# metadata update
# create a model instance resource
some_file = 'hs_core/tests/data/cea.tif'
file_to_upload = open(some_file, "r")
self._create_resource(resource_type="ModelInstanceResource", file_to_upload=file_to_upload)
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
],
"modeloutput": {"includes_output": False},
"executedby": {"model_name": "id of a an existing model program resource"}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_modelinstance_resource_without_core_metadata(self):
# testing bulk metadata update updates only resource specific metadata
# create a model instance resource
some_file = 'hs_core/tests/data/cea.tif'
file_to_upload = open(some_file, "r")
self._create_resource(resource_type="ModelInstanceResource", file_to_upload=file_to_upload)
# create a model program resource to link as executed by
model_program_resource = resource.create_resource(
resource_type="ModelProgramResource",
owner=self.user,
title="A model program resource",
files=(file_to_upload,)
)
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"modeloutput": {"includes_output": True},
"executedby": {"model_name": model_program_resource.short_id}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
model_program_resource.delete()
def test_put_scimeta_modflowinstance_resource_with_core_metadata(self):
# testing bulk metadata update that includes both core metadata and resource specific
# metadata update
# create a MODFLOW model instance resource
some_file = 'hs_core/tests/data/cea.tif'
file_to_upload = open(some_file, "r")
self._create_resource(resource_type="MODFLOWModelInstanceResource",
file_to_upload=file_to_upload)
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
],
"modeloutput": {"includes_output": False},
"executedby": {"model_name": "id of a an existing model program resource"},
"studyarea": {
"totalLength": 1111,
"totalWidth": 2222,
"maximumElevation": 3333,
"minimumElevation": 4444
},
"griddimensions": {
"numberOfLayers": 5555,
"typeOfRows": "Irregular",
"numberOfRows": 6666,
"typeOfColumns": "Regular",
"numberOfColumns": 7777
},
"stressperiod": {
"stressPeriodType": "Steady and Transient",
"steadyStateValue": 8888,
"transientStateValueType": "Monthly",
"transientStateValue": 9999
},
"groundwaterflow": {
"flowPackage": "LPF",
"flowParameter": "Hydraulic Conductivity"
},
"boundarycondition": {
"specified_head_boundary_packages": ["CHD", "FHB"],
"specified_flux_boundary_packages": ["FHB", "WEL"],
"head_dependent_flux_boundary_packages": ["RIV", "MNW1"]
},
"modelcalibration": {
"calibratedParameter": "test parameter",
"observationType": "test observation type",
"observationProcessPackage": "GBOB",
"calibrationMethod": "test calibration method"
},
"modelinputs": [
{
"inputType": "test input type",
"inputSourceName": "test source name",
"inputSourceURL": "http://www.test.com"
}
],
"generalelements": {
"modelParameter": "test model parameter",
"modelSolver": "SIP",
"output_control_package": ["HYD", "OC"],
"subsidencePackage": "SWT"
}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_modflowinstance_resource_without_core_metadata(self):
# testing bulk metadata update that updates onlt the resource specific
# metadata
# create a MODFLOW model instance resource
some_file = 'hs_core/tests/data/cea.tif'
file_to_upload = open(some_file, "r")
self._create_resource(resource_type="MODFLOWModelInstanceResource",
file_to_upload=file_to_upload)
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"modeloutput": {"includes_output": False},
"executedby": {"model_name": "id of a an existing model program resource"},
"studyarea": {
"totalLength": 1111,
"totalWidth": 2222,
"maximumElevation": 3333,
"minimumElevation": 4444
},
"griddimensions": {
"numberOfLayers": 5555,
"typeOfRows": "Irregular",
"numberOfRows": 6666,
"typeOfColumns": "Regular",
"numberOfColumns": 7777
},
"stressperiod": {
"stressPeriodType": "Steady and Transient",
"steadyStateValue": 8888,
"transientStateValueType": "Monthly",
"transientStateValue": 9999
},
"groundwaterflow": {
"flowPackage": "LPF",
"flowParameter": "Hydraulic Conductivity"
},
"boundarycondition": {
"specified_head_boundary_packages": ["CHD", "FHB"],
"specified_flux_boundary_packages": ["FHB", "WEL"],
"head_dependent_flux_boundary_packages": ["RIV", "MNW1"]
},
"modelcalibration": {
"calibratedParameter": "test parameter",
"observationType": "test observation type",
"observationProcessPackage": "GBOB",
"calibrationMethod": "test calibration method"
},
"modelinputs": [
{
"inputType": "test input type-1",
"inputSourceName": "test source name-1",
"inputSourceURL": "http://www.test-1.com"
},
{
"inputType": "test input type-2",
"inputSourceName": "test source name-2",
"inputSourceURL": "http://www.test-2.com"
}
],
"generalelements": {
"modelParameter": "test model parameter",
"modelSolver": "SIP",
"output_control_package": ["HYD", "OC"],
"subsidencePackage": "SWT"
}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_script_resource_with_core_metadata(self):
# testing bulk metadata update that includes both core metadata and resource specific
# metadata update
# create a script resource
self._create_resource(resource_type="ScriptResource")
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
],
"scriptspecificmetadata": {
"scriptLanguage": "R",
"languageVersion": "3.5",
"scriptVersion": "1.0",
"scriptDependencies": "None",
"scriptReleaseDate": "2015-12-01 00:00",
"scriptCodeRepository": "http://www.google.com"
}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_script_resource_without_core_metadata(self):
# testing bulk metadata update for resource specific
# metadata only
# create a script resource
self._create_resource(resource_type="ScriptResource")
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"scriptspecificmetadata": {
"scriptLanguage": "R",
"languageVersion": "3.5",
"scriptVersion": "1.0",
"scriptDependencies": "None",
"scriptReleaseDate": "2015-12-01 00:00",
"scriptCodeRepository": "http://www.google.com"
}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_SWATModelInstance_resource_with_core_metadata(self):
# testing bulk metadata update that includes both core metadata and resource specific
# metadata update
# create a SWAT model resource
self._create_resource(resource_type="SWATModelInstanceResource")
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
],
"modeloutput": {"includes_output": False},
"executedby": {"model_name": "id of a an existing model program resource"},
"modelobjective": {
"swat_model_objectives": ["BMPs", "Hydrology", "Water quality"],
"other_objectives": "some other objectives"
},
"simulationtype": {
"simulation_type_name": "Normal Simulation"
},
"modelmethod": {
"runoffCalculationMethod": "A test calculation method",
"flowRoutingMethod": "A test flow routing method",
"petEstimationMethod": "A test estimation method"
},
"modelparameter": {
"model_parameters": ["Crop rotation", "Tillage operation"],
"other_parameters": "some other model parameters"
},
"modelinput": {
"warmupPeriodValue": 10,
"rainfallTimeStepType": "Daily",
"rainfallTimeStepValue": 5,
"routingTimeStepType": "Daily",
"routingTimeStepValue": 2,
"simulationTimeStepType": "Hourly",
"simulationTimeStepValue": 1,
"watershedArea": 1000,
"numberOfSubbasins": 200,
"numberOfHRUs": 10000,
"demResolution": 30,
"demSourceName": "Unknown",
"demSourceURL": "http://dem-source.org",
"landUseDataSourceName": "Unknown",
"landUseDataSourceURL": "http://land-data.org",
"soilDataSourceName": "Unknown",
"soilDataSourceURL": "http://soil-data.org"
}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_scimeta_SWATModelInstance_resource_without_core_metadata(self):
# testing bulk metadata update that includes only resource specific
# metadata update
# create a SWAT model resource
self._create_resource(resource_type="SWATModelInstanceResource")
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"modeloutput": {"includes_output": False},
"executedby": {"model_name": "id of a an existing model program resource"},
"modelobjective": {
"swat_model_objectives": ["BMPs", "Hydrology", "Water quality"],
"other_objectives": "some other objectives"
},
"simulationtype": {
"simulation_type_name": "Normal Simulation"
},
"modelmethod": {
"runoffCalculationMethod": "A test calculation method",
"flowRoutingMethod": "A test flow routing method",
"petEstimationMethod": "A test estimation method"
},
"modelparameter": {
"model_parameters": ["Crop rotation", "Tillage operation"],
"other_parameters": "some other model parameters"
},
"modelinput": {
"warmupPeriodValue": 10,
"rainfallTimeStepType": "Daily",
"rainfallTimeStepValue": 5,
"routingTimeStepType": "Daily",
"routingTimeStepValue": 2,
"simulationTimeStepType": "Hourly",
"simulationTimeStepValue": 1,
"watershedArea": 1000,
"numberOfSubbasins": 200,
"numberOfHRUs": 10000,
"demResolution": 30,
"demSourceName": "Unknown",
"demSourceURL": "http://dem-source.org",
"landUseDataSourceName": "Unknown",
"landUseDataSourceURL": "http://land-data.org",
"soilDataSourceName": "Unknown",
"soilDataSourceURL": "http://soil-data.org"
}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_web_app_resource_with_core_metadata(self):
# testing bulk metadata update that includes both core metadata and resource specific
# metadata update
# create a web app resource
self._create_resource(resource_type="ToolResource")
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"title": "New Title",
"description": "New Description",
"subjects": [
{"value": "subject1"},
{"value": "subject2"},
{"value": "subject3"}
],
"contributors": [{
"name": "Test Name 1",
"organization": "Org 1"
}, {
"name": "Test Name 2",
"organization": "Org 2"
}],
"creators": [{
"name": "Creator",
"organization": None
}],
"coverages": [{
"type": "box",
"value": {
"northlimit": 43.19716728247476,
"projection": "WGS 84 EPSG:4326",
"name": "A whole bunch of the atlantic ocean",
"units": "Decimal degrees",
"southlimit": 23.8858376999,
"eastlimit": -19.16015625,
"westlimit": -62.75390625
}
}],
"dates": [
{
"type": "valid",
"start_date": "2016-12-07T00:00:00Z",
"end_date": "2018-12-07T00:00:00Z"
}
],
"language": "fre",
"rights": "CCC",
"sources": [
{
"derived_from": "Source 3"
},
{
"derived_from": "Source 2"
}
],
"requesturlbase": {
"value": "https://www.google.com"
},
"toolversion": {
"value": "1.12"
},
"supportedrestypes": {
"supported_res_types": ["GenericResource"]
},
"supportedsharingstatuses": {
"sharing_status": ["Public", "Discoverable"]
},
"toolicon": {
"value": "https://www.hydroshare.org/static/img/logo-sm.png"
},
"apphomepageurl": {
"value": "https://mywebapp.com"
}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def test_put_web_app_resource_without_core_metadata(self):
# testing bulk metadata update that includes only resource specific
# metadata update
# create a web app resource
self._create_resource(resource_type="ToolResource")
sysmeta_url = "/hydroshare/hsapi/resource/{res_id}/scimeta/elements/".format(
res_id=self.resource.short_id)
put_data = {
"requesturlbase": {
"value": "https://www.google.com"
},
"toolversion": {
"value": "1.12"
},
"supportedrestypes": {
"supported_res_types": ["GenericResource"]
},
"supportedsharingstatuses": {
"sharing_status": ["Public", "Discoverable"]
},
"toolicon": {
"value": "https://www.hydroshare.org/static/img/logo-sm.png"
},
"apphomepageurl": {
"value": "https://mywebapp.com"
}
}
response = self.client.put(sysmeta_url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.resource.delete()
def _create_resource(self, resource_type, file_to_upload=None):
files = ()
if file_to_upload is not None:
files = (file_to_upload,)
self.resource = resource.create_resource(
resource_type=resource_type,
owner=self.user,
title="Testing bulk metadata update for resource type - {}".format(resource_type),
files=files
)
resource_post_create_actions(resource=self.resource, user=self.user,
metadata=self.resource.metadata)
|
|
# Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qcore.asserts import assert_eq, assert_is, assert_is_instance, AssertRaises
from asynq import asynq, async_proxy, is_pure_async_fn, async_call, ConstFuture
from asynq.decorators import (
lazy,
get_async_fn,
get_async_or_sync_fn,
make_async_decorator,
AsyncDecorator,
)
import pickle
def double_return_value(fun):
@asynq(pure=True)
def wrapper_fn(*args, **kwargs):
value = yield fun.asynq(*args, **kwargs)
return value * 2
return make_async_decorator(fun, wrapper_fn, "double_return_value")
@double_return_value
@asynq()
def square(x):
return x * x
class MyClass(object):
@asynq()
def method(self, number):
assert type(self) is MyClass
cls, one = self.get_cls_and_args(number)
assert cls is MyClass
assert one == number
cls, one = yield self.get_cls_and_args.asynq(number)
assert cls is MyClass
assert one == number
one = yield self.static(number)
assert one == number
one = yield self.static_ac.asynq(number)
assert one == number
cls, proxied = yield self.async_proxy_classmethod.asynq(number)
assert cls is MyClass
assert proxied == number
return self
@async_proxy()
@classmethod
def async_proxy_classmethod(cls, number):
return cls.get_cls_and_args.asynq(number)
@asynq()
@classmethod
def get_cls_and_args(cls, number):
print("get_cls_and_args")
assert cls.get_cls_ac() is cls
assert (yield cls.get_cls_ac.asynq()) is cls
assert cls.get_cls().value() is cls
assert (yield cls.get_cls()) is cls
return (cls, number)
@asynq()
@classmethod
def get_cls_ac(cls):
print("get_cls_ac")
return cls
@asynq(pure=True)
@classmethod
def get_cls(cls):
print("get_cls")
return cls
@asynq()
@staticmethod
def static_ac(number):
print("static_ac")
return number
@staticmethod
@asynq(pure=True)
def static(number):
print("static")
return number
@staticmethod
def sync_staticmethod():
return "sync_staticmethod"
@asynq(sync_fn=sync_staticmethod)
@staticmethod
def async_staticmethod():
return "async_staticmethod"
@classmethod
def sync_classmethod(cls):
return "sync_classmethod"
@asynq(sync_fn=sync_classmethod)
@classmethod
def async_classmethod(cls):
return "async_classmethod"
def sync_method(self):
return "sync_method"
@asynq(sync_fn=sync_method)
def async_method(self):
return "async_method"
@double_return_value
@asynq()
@classmethod
def square(cls, x):
return x * x
def sync_fn():
return "sync_fn"
@asynq(sync_fn=sync_fn)
def async_fn():
return "async_fn"
@asynq(pure=True)
def pure_async_fn():
return "pure_async_fn"
def sync_proxied_fn():
return "sync_proxied_fn"
@async_proxy(sync_fn=sync_proxied_fn)
def async_proxied_fn():
return ConstFuture("async_proxied_fn")
@lazy
def lazy_fn(a, b):
return a + b
def test_lazy():
future = lazy_fn(1, 2)
assert not future.is_computed()
assert_eq(3, future.value())
assert future.is_computed()
class DisallowSetting(object):
def fn(self):
return False
def __setattr__(self, attr, value):
raise AttributeError("cannot set attribute %s" % attr)
def test_is_pure_async_fn():
assert is_pure_async_fn(lazy_fn)
assert not is_pure_async_fn(test_lazy)
assert not is_pure_async_fn(async_fn)
assert is_pure_async_fn(pure_async_fn)
assert not is_pure_async_fn(DisallowSetting())
assert is_pure_async_fn(MyClass.get_cls)
assert not is_pure_async_fn(MyClass.get_cls_ac)
assert not is_pure_async_fn(AsyncDecorator)
def test_get_async_fn():
assert_eq(async_fn.asynq, get_async_fn(async_fn))
assert_eq(lazy_fn, get_async_fn(lazy_fn))
assert_is(None, get_async_fn(sync_fn))
wrapper = get_async_fn(sync_fn, wrap_if_none=True)
assert is_pure_async_fn(wrapper)
result = wrapper()
assert_is_instance(result, ConstFuture)
assert_eq("sync_fn", result.value())
def test_get_async_or_sync_fn():
assert_is(sync_fn, get_async_or_sync_fn(sync_fn))
assert_eq(async_fn.asynq, get_async_or_sync_fn(async_fn))
def test_async_proxy():
assert_eq("sync_proxied_fn", sync_proxied_fn())
assert_eq("sync_proxied_fn", async_proxied_fn())
result = async_proxied_fn.asynq()
assert_is_instance(result, ConstFuture)
assert_eq("async_proxied_fn", result.value())
with AssertRaises(AssertionError):
@async_proxy(pure=True, sync_fn=sync_proxied_fn)
def this_doesnt_make_sense():
pass
def test():
obj = MyClass()
assert obj is obj.method(1)
def test_staticmethod_sync_fn():
assert_eq("sync_staticmethod", MyClass.async_staticmethod())
assert_eq("async_staticmethod", MyClass.async_staticmethod.asynq().value())
def test_classmethod_sync_fn():
assert_eq("async_classmethod", MyClass.async_classmethod.asynq().value())
assert_eq("sync_classmethod", MyClass.async_classmethod())
def test_method_sync_fn():
instance = MyClass()
assert_eq("sync_method", instance.async_method())
assert_eq("async_method", instance.async_method.asynq().value())
def test_pickling():
pickled = pickle.dumps(async_fn)
unpickled = pickle.loads(pickled)
assert_eq("sync_fn", unpickled())
def test_async_call():
@asynq()
def f1(arg, kw=1):
return arg, kw
@asynq(pure=True)
def f2(arg, kw=1):
return arg, kw
def f3(arg, kw=1):
return arg, kw
for f in [f1, f2, f3]:
assert_eq((10, 1), async_call.asynq(f, 10).value())
assert_eq((10, 5), async_call.asynq(f, 10, 5).value())
assert_eq((10, 7), async_call.asynq(f, 10, kw=7).value())
def test_make_async_decorator():
assert_eq(18, square(3))
assert_eq(18, MyClass.square(3))
assert_eq(18, square.asynq(3).value())
assert_eq(18, MyClass.square.asynq(3).value())
assert not is_pure_async_fn(square)
assert_eq("@double_return_value()", square.name())
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_workspace_request(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
key_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"keyName": _SERIALIZER.url("key_name", key_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
key_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"keyName": _SERIALIZER.url("key_name", key_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
key_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"keyName": _SERIALIZER.url("key_name", key_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class KeysOperations(object):
"""KeysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_workspace(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> Iterable["_models.KeyInfoListResult"]:
"""Returns a list of keys in a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyInfoListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.synapse.models.KeyInfoListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_by_workspace.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("KeyInfoListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
workspace_name: str,
key_name: str,
**kwargs: Any
) -> "_models.Key":
"""Gets a workspace key.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param key_name: The name of the workspace key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Key, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.Key
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Key"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
key_name=key_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Key', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
key_name: str,
key_properties: "_models.Key",
**kwargs: Any
) -> "_models.Key":
"""Creates or updates a workspace key.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param key_name: The name of the workspace key.
:type key_name: str
:param key_properties: Key put request properties.
:type key_properties: ~azure.mgmt.synapse.models.Key
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Key, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.Key
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Key"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(key_properties, 'Key')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
key_name=key_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Key', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
workspace_name: str,
key_name: str,
**kwargs: Any
) -> Optional["_models.Key"]:
"""Deletes a workspace key.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param key_name: The name of the workspace key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Key, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.Key or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Key"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
key_name=key_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Key', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/keys/{keyName}'} # type: ignore
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a globalboostyd or globalboost-y-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BSTY values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the globalboostyd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(globalboostyd):
info = globalboostyd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
globalboostyd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = globalboostyd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(globalboostyd):
address_summary = dict()
address_to_account = dict()
for info in globalboostyd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = globalboostyd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = globalboostyd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-globalboost-y-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(globalboostyd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(globalboostyd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BSTY available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to globalboostyd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = globalboostyd.createrawtransaction(inputs, outputs)
signed_rawtx = globalboostyd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(globalboostyd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = globalboostyd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(globalboostyd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = globalboostyd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(globalboostyd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
globalboostyd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(globalboostyd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(globalboostyd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(globalboostyd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(globalboostyd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = globalboostyd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import (abs, asarray, cos, exp, floor, pi, sign, sin, sqrt, sum,
size, tril, isnan, atleast_2d, repeat)
from numpy.testing import assert_almost_equal
from .go_benchmark import Benchmark
class CarromTable(Benchmark):
"""
CarromTable objective function.
The CarromTable [1]_ global optimization problem is a multimodal
minimization problem defined as follows:
.. math::
f_{\text{CarromTable}}(x) = - \frac{1}{30}\left(\cos(x_1)
cos(x_2) e^{\left|1 - \frac{\sqrt{x_1^2 + x_2^2}}{\pi}\right|}\right)^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -24.15681551650653` for :math:`x_i = \pm
9.646157266348881` for :math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-10.0] * self.N, [10.0] * self.N)
self.global_optimum = [(9.646157266348881, 9.646134286497169),
(-9.646157266348881, 9.646134286497169),
(9.646157266348881, -9.646134286497169),
(-9.646157266348881, -9.646134286497169)]
self.fglob = -24.15681551650653
def fun(self, x, *args):
self.nfev += 1
u = cos(x[0]) * cos(x[1])
v = sqrt(x[0] ** 2 + x[1] ** 2)
return -((u * exp(abs(1 - v / pi))) ** 2) / 30.
class Chichinadze(Benchmark):
"""
Chichinadze objective function.
This class defines the Chichinadze [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Chichinadze}}(x) = x_{1}^{2} - 12 x_{1}
+ 8 \sin\left(\frac{5}{2} \pi x_{1}\right)
+ 10 \cos\left(\frac{1}{2} \pi x_{1}\right) + 11
- 0.2 \frac{\sqrt{5}}{e^{\frac{1}{2} \left(x_{2} -0.5 \right)^{2}}}
with :math:`x_i \in [-30, 30]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -42.94438701899098` for :math:`x =
[6.189866586965680, 0.5]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil#33 has a dividing factor of 2 in the sin term. However, f(x)
for the given solution does not give the global minimum. i.e. the equation
is at odds with the solution.
Only by removing the dividing factor of 2, i.e. `8 * sin(5 * pi * x[0])`
does the given solution result in the given global minimum.
Do we keep the result or equation?
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-30.0] * self.N, [30.0] * self.N)
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [[6.189866586965680, 0.5]]
self.fglob = -42.94438701899098
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 - 12 * x[0] + 11 + 10 * cos(pi * x[0] / 2)
+ 8 * sin(5 * pi * x[0] / 2)
- 1.0 / sqrt(5) * exp(-((x[1] - 0.5) ** 2) / 2))
class Cigar(Benchmark):
"""
Cigar objective function.
This class defines the Cigar [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Cigar}}(x) = x_1^2 + 10^6\sum_{i=2}^{n} x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-100.0] * self.N,
[100.0] * self.N)
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return x[0] ** 2 + 1e6 * sum(x[1:] ** 2)
class Cola(Benchmark):
"""
Cola objective function.
This class defines the Cola global optimization problem. The 17-dimensional
function computes indirectly the formula :math:`f(n, u)` by setting
:math:`x_0 = y_0, x_1 = u_0, x_i = u_{2(i2)}, y_i = u_{2(i2)+1}` :
.. math::
f_{\text{Cola}}(x) = \sum_{i<j}^{n} \left (r_{i,j} - d_{i,j} \right )^2
Where :math:`r_{i, j}` is given by:
.. math::
r_{i, j} = \sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
And :math:`d` is a symmetric matrix given by:
.. math::
\{d} = \left [ d_{ij} \right ] = \begin{pmatrix}
1.27 & & & & & & & & \\
1.69 & 1.43 & & & & & & & \\
2.04 & 2.35 & 2.43 & & & & & & \\
3.09 & 3.18 & 3.26 & 2.85 & & & & & \\
3.20 & 3.22 & 3.27 & 2.88 & 1.55 & & & & \\
2.86 & 2.56 & 2.58 & 2.59 & 3.12 & 3.06 & & & \\
3.17 & 3.18 & 3.18 & 3.12 & 1.31 & 1.64 & 3.00 & \\
3.21 & 3.18 & 3.18 & 3.17 & 1.70 & 1.36 & 2.95 & 1.32 & \\
2.38 & 2.31 & 2.42 & 1.94 & 2.85 & 2.81 & 2.56 & 2.91 & 2.97
\end{pmatrix}
This function has bounds :math:`x_0 \in [0, 4]` and :math:`x_i \in [-4, 4]`
for :math:`i = 1, ..., n-1`.
*Global optimum* 11.7464.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=17):
Benchmark.__init__(self, dimensions)
self._bounds = [[0.0, 4.0]] + list(zip([-4.0] * (self.N - 1),
[4.0] * (self.N - 1)))
self.global_optimum = [[0.651906, 1.30194, 0.099242, -0.883791,
-0.8796, 0.204651, -3.28414, 0.851188,
-3.46245, 2.53245, -0.895246, 1.40992,
-3.07367, 1.96257, -2.97872, -0.807849,
-1.68978]]
self.fglob = 11.7464
self.d = asarray([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.27, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.69, 1.43, 0, 0, 0, 0, 0, 0, 0, 0],
[2.04, 2.35, 2.43, 0, 0, 0, 0, 0, 0, 0],
[3.09, 3.18, 3.26, 2.85, 0, 0, 0, 0, 0, 0],
[3.20, 3.22, 3.27, 2.88, 1.55, 0, 0, 0, 0, 0],
[2.86, 2.56, 2.58, 2.59, 3.12, 3.06, 0, 0, 0, 0],
[3.17, 3.18, 3.18, 3.12, 1.31, 1.64, 3.00, 0, 0, 0],
[3.21, 3.18, 3.18, 3.17, 1.70, 1.36, 2.95, 1.32, 0, 0],
[2.38, 2.31, 2.42, 1.94, 2.85, 2.81, 2.56, 2.91, 2.97, 0.]])
def fun(self, x, *args):
self.nfev += 1
xi = atleast_2d(asarray([0.0, x[0]] + list(x[1::2])))
xj = repeat(xi, size(xi, 1), axis=0)
xi = xi.T
yi = atleast_2d(asarray([0.0, 0.0] + list(x[2::2])))
yj = repeat(yi, size(yi, 1), axis=0)
yi = yi.T
inner = (sqrt(((xi - xj) ** 2 + (yi - yj) ** 2)) - self.d) ** 2
inner = tril(inner, -1)
return sum(sum(inner, axis=1))
class Colville(Benchmark):
"""
Colville objective function.
This class defines the Colville global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Colville}}(x) = \left(x_{1} -1\right)^{2}
+ 100 \left(x_{1}^{2} - x_{2}\right)^{2}
+ 10.1 \left(x_{2} -1\right)^{2} + \left(x_{3} -1\right)^{2}
+ 90 \left(x_{3}^{2} - x_{4}\right)^{2}
+ 10.1 \left(x_{4} -1\right)^{2} + 19.8 \frac{x_{4} -1}{x_{2}}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., 4`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO docstring equation is wrong use Jamil#36
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-10.0] * self.N, [10.0] * self.N)
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (100 * (x[0] - x[1] ** 2) ** 2
+ (1 - x[0]) ** 2 + (1 - x[2]) ** 2
+ 90 * (x[3] - x[2] ** 2) ** 2
+ 10.1 * ((x[1] - 1) ** 2 + (x[3] - 1) ** 2)
+ 19.8 * (x[1] - 1) * (x[3] - 1))
class Corana(Benchmark):
"""
Corana objective function.
This class defines the Corana [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Corana}}(x) = \begin{cases} \sum_{i=1}^n 0.15 d_i
[z_i - 0.05\textrm{sgn}(z_i)]^2 & \textrm{if }|x_i-z_i| < 0.05 \\
d_ix_i^2 & \textrm{otherwise}\end{cases}
Where, in this exercise:
.. math::
z_i = 0.2 \lfloor |x_i/s_i|+0.49999\rfloor\textrm{sgn}(x_i),
d_i=(1,1000,10,100, ...)
with :math:`x_i \in [-5, 5]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., 4`
..[1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-5.0] * self.N, [5.0] * self.N)
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
d = [1., 1000., 10., 100.]
r = 0
for j in range(4):
zj = floor(abs(x[j] / 0.2) + 0.49999) * sign(x[j]) * 0.2
if abs(x[j] - zj) < 0.05:
r += 0.15 * ((zj - 0.05 * sign(zj)) ** 2) * d[j]
else:
r += d[j] * x[j] * x[j]
return r
class CosineMixture(Benchmark):
"""
Cosine Mixture objective function.
This class defines the Cosine Mixture global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{CosineMixture}}(x) = -0.1 \sum_{i=1}^n \cos(5 \pi x_i)
- \sum_{i=1}^n x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-1, 1]` for :math:`i = 1, ..., N`.
*Global optimum*: :math:`f(x) = -0.1N` for :math:`x_i = 0` for
:math:`i = 1, ..., N`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO, Jamil #38 has wrong minimum and wrong fglob. I plotted it.
-(x**2) term is always negative if x is negative.
cos(5 * pi * x) is equal to -1 for x=-1.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self._bounds = zip([-1.0] * self.N, [1.0] * self.N)
self.global_optimum = [[-1. for _ in range(self.N)]]
self.fglob = -0.9 * self.N
def fun(self, x, *args):
self.nfev += 1
return -0.1 * sum(cos(5.0 * pi * x)) - sum(x ** 2.0)
class CrossInTray(Benchmark):
"""
Cross-in-Tray objective function.
This class defines the Cross-in-Tray [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{CrossInTray}}(x) = - 0.0001 \left(\left|{e^{\left|{100
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\right|}
\sin\left(x_{1}\right) \sin\left(x_{2}\right)}\right| + 1\right)^{0.1}
with :math:`x_i \in [-15, 15]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -2.062611870822739` for :math:`x_i =
\pm 1.349406608602084` for :math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-10.0] * self.N, [10.0] * self.N)
self.global_optimum = [(1.349406685353340, 1.349406608602084),
(-1.349406685353340, 1.349406608602084),
(1.349406685353340, -1.349406608602084),
(-1.349406685353340, -1.349406608602084)]
self.fglob = -2.062611870822739
def fun(self, x, *args):
self.nfev += 1
return (-0.0001 * (abs(sin(x[0]) * sin(x[1])
* exp(abs(100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi)))
+ 1) ** (0.1))
class CrossLegTable(Benchmark):
"""
Cross-Leg-Table objective function.
This class defines the Cross-Leg-Table [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{CrossLegTable}}(x) = - \frac{1}{\left(\left|{e^{\left|{100
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\right|}
\sin\left(x_{1}\right) \sin\left(x_{2}\right)}\right| + 1\right)^{0.1}}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -1`. The global minimum is found on the
planes :math:`x_1 = 0` and :math:`x_2 = 0`
..[1] Mishra, S. Global Optimization by Differential Evolution and Particle
Swarm Methods: Evaluation on Some Benchmark Functions Munich University,
2006
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-10.0] * self.N, [10.0] * self.N)
self.global_optimum = [[0., 0.]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
u = 100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi
v = sin(x[0]) * sin(x[1])
return -(abs(v * exp(abs(u))) + 1) ** (-0.1)
class CrownedCross(Benchmark):
"""
Crowned Cross objective function.
This class defines the Crowned Cross [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{CrownedCross}}(x) = 0.0001 \left(\left|{e^{\left|{100
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\right|}
\sin\left(x_{1}\right) \sin\left(x_{2}\right)}\right| + 1\right)^{0.1}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = 0.0001`. The global minimum is found on
the planes :math:`x_1 = 0` and :math:`x_2 = 0`
..[1] Mishra, S. Global Optimization by Differential Evolution and Particle
Swarm Methods: Evaluation on Some Benchmark Functions Munich University,
2006
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-10.0] * self.N, [10.0] * self.N)
self.global_optimum = [[0, 0]]
self.fglob = 0.0001
def fun(self, x, *args):
self.nfev += 1
u = 100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi
v = sin(x[0]) * sin(x[1])
return 0.0001 * (abs(v * exp(abs(u))) + 1) ** (0.1)
class Csendes(Benchmark):
"""
Csendes objective function.
This class defines the Csendes [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Csendes}}(x) = \sum_{i=1}^n x_i^6 \left[ 2 + \sin
\left( \frac{1}{x_i} \right ) \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-1, 1]` for :math:`i = 1, ..., N`.
*Global optimum*: :math:`f(x) = 0.0` for :math:`x_i = 0` for
:math:`i = 1, ..., N`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self._bounds = zip([-1.0] * self.N, [1.0] * self.N)
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = np.nan
def fun(self, x, *args):
self.nfev += 1
try:
return sum((x ** 6.0) * (2.0 + sin(1.0 / x)))
except ZeroDivisionError:
return np.nan
except FloatingPointError:
return np.nan
def success(self, x):
"""Is a candidate solution at the global minimum"""
val = self.fun(asarray(x))
if isnan(val):
return True
try:
assert_almost_equal(val, 0., 4)
return True
except AssertionError:
return False
return False
class Cube(Benchmark):
"""
Cube objective function.
This class defines the Cube global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Cube}}(x) = 100(x_2 - x_1^3)^2 + (1 - x1)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i=1,...,N`.
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x = [1, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: jamil#41 has the wrong solution.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-10.0] * self.N, [10.0] * self.N)
self.custom_bounds = ([0, 2], [0, 2])
self.global_optimum = [[1.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 100.0 * (x[1] - x[0] ** 3.0) ** 2.0 + (1.0 - x[0]) ** 2.0
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ftrl-proximal for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.Ftrl')
class Ftrl(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the FTRL algorithm.
See Algorithm 1 of this [paper](
https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf).
This version has support for both online L2 (the L2 penalty given in the paper
above) and shrinkage-type L2 (which is the addition of an L2 penalty to the
loss function).
Initialization:
$$t = 0$$
$$n_{0} = 0$$
$$\sigma_{0} = 0$$
$$z_{0} = 0$$
Update ($$i$$ is variable index):
$$t = t + 1$$
$$n_{t,i} = n_{t-1,i} + g_{t,i}^{2}$$
$$\sigma_{t,i} = (\sqrt{n_{t,i}} - \sqrt{n_{t-1,i}}) / \alpha$$
$$z_{t,i} = z_{t-1,i} + g_{t,i} - \sigma_{t,i} * w_{t,i}$$
$$w_{t,i} = - ((\beta+\sqrt{n+{t}}) / \alpha + \lambda_{2})^{-1} * (z_{i} -
sgn(z_{i}) * \lambda_{1}) if \abs{z_{i}} > \lambda_{i} else 0$$
Check the documentation for the l2_shrinkage_regularization_strength
parameter for more details when shrinkage is enabled, where gradient is
replaced with gradient_with_shrinkage.
"""
def __init__(self,
learning_rate=0.001,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
name='Ftrl',
l2_shrinkage_regularization_strength=0.0,
**kwargs):
r"""Construct a new FTRL optimizer.
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero for
a fixed learning rate.
initial_accumulator_value: The starting value for accumulators.
Only zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Ftrl".
l2_shrinkage_regularization_strength: A float value, must be greater than
or equal to zero. This differs from L2 above in that the L2 above is a
stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.
The FTRL formulation can be written as:
w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where
\hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss
function w.r.t. the weights w.
Specifically, in the absence of L1 regularization, it is equivalent to
the following update rule:
w_{t+1} = w_t - lr_t / (1 + 2*L2*lr_t) * g_t -
2*L2_shrinkage*lr_t / (1 + 2*L2*lr_t) * w_t
where lr_t is the learning rate at t.
When input is sparse shrinkage will only happen on the active weights.\
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
Raises:
ValueError: If one of the arguments is invalid.
References
See [paper]
(https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf)
"""
super(Ftrl, self).__init__(name, **kwargs)
if initial_accumulator_value < 0.0:
raise ValueError(
'initial_accumulator_value %f needs to be positive or zero' %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError('learning_rate_power %f needs to be negative or zero' %
learning_rate_power)
if l1_regularization_strength < 0.0:
raise ValueError(
'l1_regularization_strength %f needs to be positive or zero' %
l1_regularization_strength)
if l2_regularization_strength < 0.0:
raise ValueError(
'l2_regularization_strength %f needs to be positive or zero' %
l2_regularization_strength)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
'l2_shrinkage_regularization_strength %f needs to be positive'
' or zero' % l2_shrinkage_regularization_strength)
self._set_hyper('learning_rate', learning_rate)
self._set_hyper('decay', self._initial_decay)
self._set_hyper('learning_rate_power', learning_rate_power)
self._set_hyper('l1_regularization_strength', l1_regularization_strength)
self._set_hyper('l2_regularization_strength', l2_regularization_strength)
self._initial_accumulator_value = initial_accumulator_value
self._l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength)
def _create_slots(self, var_list):
# Create the "accum" and "linear" slots.
for var in var_list:
dtype = var.dtype.base_dtype
init = init_ops.constant_initializer(
self._initial_accumulator_value, dtype=dtype)
self.add_slot(var, 'accumulator', init)
self.add_slot(var, 'linear')
def _prepare_local(self, var_device, var_dtype, apply_state):
super(Ftrl, self)._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)].update(dict(
learning_rate_power=array_ops.identity(
self._get_hyper('learning_rate_power', var_dtype)),
l1_regularization_strength=array_ops.identity(
self._get_hyper('l1_regularization_strength', var_dtype)),
l2_regularization_strength=array_ops.identity(
self._get_hyper('l2_regularization_strength', var_dtype)),
l2_shrinkage_regularization_strength=math_ops.cast(
self._l2_shrinkage_regularization_strength, var_dtype)
))
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
accum = self.get_slot(var, 'accumulator')
linear = self.get_slot(var, 'linear')
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
coefficients['lr_t'],
coefficients['l1_regularization_strength'],
coefficients['l2_regularization_strength'],
coefficients['learning_rate_power'],
use_locking=self._use_locking)
else:
return training_ops.resource_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
coefficients['lr_t'],
coefficients['l1_regularization_strength'],
coefficients['l2_regularization_strength'],
coefficients['l2_shrinkage_regularization_strength'],
coefficients['learning_rate_power'],
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
accum = self.get_slot(var, 'accumulator')
linear = self.get_slot(var, 'linear')
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_sparse_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
coefficients['lr_t'],
coefficients['l1_regularization_strength'],
coefficients['l2_regularization_strength'],
coefficients['learning_rate_power'],
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
coefficients['lr_t'],
coefficients['l1_regularization_strength'],
coefficients['l2_regularization_strength'],
coefficients['l2_shrinkage_regularization_strength'],
coefficients['learning_rate_power'],
use_locking=self._use_locking)
def get_config(self):
config = super(Ftrl, self).get_config()
config.update({
'learning_rate':
self._serialize_hyperparameter('learning_rate'),
'decay':
self._serialize_hyperparameter('decay'),
'initial_accumulator_value':
self._initial_accumulator_value,
'learning_rate_power':
self._serialize_hyperparameter('learning_rate_power'),
'l1_regularization_strength':
self._serialize_hyperparameter('l1_regularization_strength'),
'l2_regularization_strength':
self._serialize_hyperparameter('l2_regularization_strength'),
'l2_shrinkage_regularization_strength':
self._l2_shrinkage_regularization_strength,
})
return config
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base code for plugins support."""
import abc
from oslo_config import cfg
from oslo_log import log
import stevedore
from ironic_inspector.common.i18n import _
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class ProcessingHook(object, metaclass=abc.ABCMeta): # pragma: no cover
"""Abstract base class for introspection data processing hooks."""
dependencies = []
"""An ordered list of hooks that must be enabled before this one.
The items here should be entry point names, not classes.
"""
def before_processing(self, introspection_data, **kwargs):
"""Hook to run before any other data processing.
This hook is run even before sanity checks.
:param introspection_data: raw information sent by the ramdisk,
may be modified by the hook.
:param kwargs: used for extensibility without breaking existing hooks
:returns: nothing.
"""
def before_update(self, introspection_data, node_info, **kwargs):
"""Hook to run before Ironic node update.
This hook is run after node is found and ports are created,
just before the node is updated with the data.
:param introspection_data: processed data from the ramdisk.
:param node_info: NodeInfo instance.
:param kwargs: used for extensibility without breaking existing hooks.
:returns: nothing.
[RFC 6902] - http://tools.ietf.org/html/rfc6902
"""
class WithValidation(object):
REQUIRED_PARAMS = set()
"""Set with names of required parameters."""
OPTIONAL_PARAMS = set()
"""Set with names of optional parameters."""
def validate(self, params, **kwargs):
"""Validate params passed during creation.
Default implementation checks for presence of fields from
REQUIRED_PARAMS and fails for unexpected fields (not from
REQUIRED_PARAMS + OPTIONAL_PARAMS).
:param params: params as a dictionary
:param kwargs: used for extensibility without breaking existing plugins
:raises: ValueError on validation failure
"""
passed = {k for k, v in params.items() if v is not None}
missing = self.REQUIRED_PARAMS - passed
unexpected = passed - self.REQUIRED_PARAMS - self.OPTIONAL_PARAMS
msg = []
if missing:
msg.append(_('missing required parameter(s): %s')
% ', '.join(missing))
if unexpected:
msg.append(_('unexpected parameter(s): %s')
% ', '.join(unexpected))
if msg:
raise ValueError('; '.join(msg))
class RuleConditionPlugin(WithValidation, metaclass=abc.ABCMeta): # pragma: no cover # noqa
"""Abstract base class for rule condition plugins."""
REQUIRED_PARAMS = {'value'}
ALLOW_NONE = False
"""Whether this condition accepts None when field is not found."""
@abc.abstractmethod
def check(self, node_info, field, params, **kwargs):
"""Check if condition holds for a given field.
:param node_info: NodeInfo object
:param field: field value
:param params: parameters as a dictionary, changing it here will change
what will be stored in database
:param kwargs: used for extensibility without breaking existing plugins
:raises ValueError: on unacceptable field value
:returns: True if check succeeded, otherwise False
"""
class RuleActionPlugin(WithValidation, metaclass=abc.ABCMeta): # pragma: no cover # noqa
"""Abstract base class for rule action plugins."""
FORMATTED_PARAMS = []
"""List of params will be formatted with python format."""
@abc.abstractmethod
def apply(self, node_info, params, **kwargs):
"""Run action on successful rule match.
:param node_info: NodeInfo object
:param params: parameters as a dictionary
:param kwargs: used for extensibility without breaking existing plugins
:raises: utils.Error on failure
"""
_HOOKS_MGR = None
_NOT_FOUND_HOOK_MGR = None
_CONDITIONS_MGR = None
_ACTIONS_MGR = None
_INTROSPECTION_DATA_MGR = None
def reset():
"""Reset cached managers."""
global _HOOKS_MGR
global _NOT_FOUND_HOOK_MGR
global _CONDITIONS_MGR
global _ACTIONS_MGR
global _INTROSPECTION_DATA_MGR
_HOOKS_MGR = None
_NOT_FOUND_HOOK_MGR = None
_CONDITIONS_MGR = None
_ACTIONS_MGR = None
_INTROSPECTION_DATA_MGR = None
def missing_entrypoints_callback(names):
"""Raise MissingHookError with comma-separated list of missing hooks"""
error = _('The following hook(s) are missing or failed to load: %s')
raise RuntimeError(error % ', '.join(names))
def processing_hooks_manager(*args):
"""Create a Stevedore extension manager for processing hooks.
:param args: arguments to pass to the hooks constructor.
"""
global _HOOKS_MGR
if _HOOKS_MGR is None:
names = [x.strip()
for x in CONF.processing.processing_hooks.split(',')
if x.strip()]
_HOOKS_MGR = stevedore.NamedExtensionManager(
'ironic_inspector.hooks.processing',
names=names,
invoke_on_load=True,
invoke_args=args,
on_missing_entrypoints_callback=missing_entrypoints_callback,
name_order=True)
return _HOOKS_MGR
def validate_processing_hooks():
"""Validate the enabled processing hooks.
:raises: MissingHookError on missing or failed to load hooks
:raises: RuntimeError on validation failure
:returns: the list of hooks passed validation
"""
hooks = [ext for ext in processing_hooks_manager()]
enabled = set()
errors = []
for hook in hooks:
deps = getattr(hook.obj, 'dependencies', ())
missing = [d for d in deps if d not in enabled]
if missing:
errors.append('Hook %(hook)s requires the following hooks to be '
'enabled before it: %(deps)s. The following hooks '
'are missing: %(missing)s.' %
{'hook': hook.name,
'deps': ', '.join(deps),
'missing': ', '.join(missing)})
enabled.add(hook.name)
if errors:
raise RuntimeError("Some hooks failed to load due to dependency "
"problems:\n%s" % "\n".join(errors))
return hooks
def node_not_found_hook_manager(*args):
global _NOT_FOUND_HOOK_MGR
if _NOT_FOUND_HOOK_MGR is None:
name = CONF.processing.node_not_found_hook
if name:
_NOT_FOUND_HOOK_MGR = stevedore.DriverManager(
'ironic_inspector.hooks.node_not_found',
name=name)
return _NOT_FOUND_HOOK_MGR
def rule_conditions_manager():
"""Create a Stevedore extension manager for conditions in rules."""
global _CONDITIONS_MGR
if _CONDITIONS_MGR is None:
_CONDITIONS_MGR = stevedore.ExtensionManager(
'ironic_inspector.rules.conditions',
invoke_on_load=True)
return _CONDITIONS_MGR
def rule_actions_manager():
"""Create a Stevedore extension manager for actions in rules."""
global _ACTIONS_MGR
if _ACTIONS_MGR is None:
_ACTIONS_MGR = stevedore.ExtensionManager(
'ironic_inspector.rules.actions',
invoke_on_load=True)
return _ACTIONS_MGR
def introspection_data_manager():
global _INTROSPECTION_DATA_MGR
if _INTROSPECTION_DATA_MGR is None:
_INTROSPECTION_DATA_MGR = stevedore.ExtensionManager(
'ironic_inspector.introspection_data.store',
invoke_on_load=True)
return _INTROSPECTION_DATA_MGR
|
|
import os
import sys
import tempfile
import unittest
import warnings
from io import StringIO
from unittest import mock
from django.apps import apps
from django.contrib.sites.models import Site
from django.core import management
from django.core.files.temp import NamedTemporaryFile
from django.core.management import CommandError
from django.core.management.commands.dumpdata import ProxyModelWarning
from django.core.serializers.base import ProgressBar
from django.db import IntegrityError, connection
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import (
Article, Category, CircularA, CircularB, NaturalKeyThing,
PrimaryKeyUUIDModel, ProxySpy, Spy, Tag, Visa,
)
try:
import bz2 # NOQA
HAS_BZ2 = True
except ImportError:
HAS_BZ2 = False
try:
import lzma # NOQA
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def test_class_fixtures(self):
"Test case has installed 3 fixture objects"
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class SubclassTestCaseFixtureLoadingTests(TestCaseFixtureLoadingTests):
"""
Make sure that subclasses can remove fixtures from parent class (#21089).
"""
fixtures = []
def test_class_fixtures(self):
"There were no fixture objects installed"
self.assertEqual(Article.objects.count(), 0)
class DumpDataAssertMixin:
def _dumpdata_assert(self, args, output, format='json', filename=None,
natural_foreign_keys=False, natural_primary_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = StringIO()
filename = filename and os.path.join(tempfile.gettempdir(), filename)
management.call_command(
'dumpdata',
*args,
format=format,
stdout=new_io,
stderr=new_io,
output=filename,
use_natural_foreign_keys=natural_foreign_keys,
use_natural_primary_keys=natural_primary_keys,
use_base_manager=use_base_manager,
exclude=exclude_list,
primary_keys=primary_keys,
)
if filename:
with open(filename) as f:
command_output = f.read()
os.remove(filename)
else:
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_loading_and_dumping(self):
apps.clear_cache()
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(
['fixtures.Category'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}]'
)
# ...and just fixtures.Article
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# ...and both
self._dumpdata_assert(
['fixtures.Category', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has '
'no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", '
'"fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a specific model twice
self._dumpdata_assert(
['fixtures.Article', 'fixtures.Article'],
(
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
)
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(
['fixtures.Article', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a dump that specifies Article both explicitly and implicitly,
# but lists the app first (#22025).
self._dumpdata_assert(
['fixtures', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Same again, but specify in the reverse order
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no '
'place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields":'
' {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(
['fixtures.Category', 'sites'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": '
'"example.com"}}]'
)
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}]'
)
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# You can also omit the primary keys for models that we can get later with natural keys.
self._dumpdata_assert(
['fixtures.person'],
'[{"fields": {"name": "Django Reinhardt"}, "model": "fixtures.person"}, {"fields": {"name": "Stephane '
'Grappelli"}, "model": "fixtures.person"}, {"fields": {"name": "Artist formerly known as '
'\\"Prince\\""}, "model": "fixtures.person"}]',
natural_primary_keys=True
)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is '
'great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, '
'"model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": '
'"2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML '
'identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", '
'"article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": '
'{"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, '
'"model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": '
'"fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", '
'"fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", '
'"fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], '
'["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": '
'"fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", '
'"user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person":'
' ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, '
'{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field '
'type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it '
'is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object '
'pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!'
'</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object '
'pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading '
'cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field>'
'</object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field>'
'<field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures'
'</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3'
'</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal'
'</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>'
'fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" '
'name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" '
'name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" '
'rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field '
'type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag">'
'<field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" '
'name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field>'
'<field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" '
'model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object>'
'<object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli'
'</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field '
'to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field>'
'<field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user'
'</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" '
'model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane'
' Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel">'
'<object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object>'
'<natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field>'
'</object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" '
'rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field '
'to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural>'
'<natural>auth</natural><natural>user</natural></object></field></object><object pk="1" '
'model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field '
'to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as '
'"Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object>'
'</django-objects>',
format='xml', natural_foreign_keys=True
)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'],
)
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites']
)
# Excluding a bogus app should throw an error
with self.assertRaisesMessage(management.CommandError, "No installed app with label 'foo_app'."):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with self.assertRaisesMessage(management.CommandError, "Unknown model: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
@unittest.skipIf(sys.platform == 'win32', "Windows doesn't support '?' in filenames.")
def test_load_fixture_with_special_characters(self):
management.call_command('loaddata', 'fixture_with[special]chars', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), ['<Article: How To Deal With Special Characters>'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk
)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": '
'"fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk),
use_base_manager=True
)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_dumpdata_with_uuid_pks(self):
m1 = PrimaryKeyUUIDModel.objects.create()
m2 = PrimaryKeyUUIDModel.objects.create()
output = StringIO()
management.call_command(
'dumpdata', 'fixtures.PrimaryKeyUUIDModel', '--pks', ', '.join([str(m1.id), str(m2.id)]),
stdout=output,
)
result = output.getvalue()
self.assertIn('"pk": "%s"' % m1.id, result)
self.assertIn('"pk": "%s"' % m2.id, result)
def test_dumpdata_with_file_output(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]',
filename='dumpdata.json'
)
def test_dumpdata_progressbar(self):
"""
Dumpdata shows a progress bar on the command line when --output is set,
stdout is a tty, and verbosity > 0.
"""
management.call_command('loaddata', 'fixture1.json', verbosity=0)
new_io = StringIO()
new_io.isatty = lambda: True
with NamedTemporaryFile() as file:
options = {
'format': 'json',
'stdout': new_io,
'stderr': new_io,
'output': file.name,
}
management.call_command('dumpdata', 'fixtures', **options)
self.assertTrue(new_io.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n'))
# Test no progress bar when verbosity = 0
options['verbosity'] = 0
new_io = StringIO()
new_io.isatty = lambda: True
options.update({'stdout': new_io, 'stderr': new_io})
management.call_command('dumpdata', 'fixtures', **options)
self.assertEqual(new_io.getvalue(), '')
def test_dumpdata_proxy_without_concrete(self):
"""
A warning is displayed if a proxy model is dumped without its concrete
parent.
"""
ProxySpy.objects.create(name='Paul')
msg = "fixtures.ProxySpy is a proxy model and won't be serialized."
with self.assertWarnsMessage(ProxyModelWarning, msg):
self._dumpdata_assert(['fixtures.ProxySpy'], '[]')
def test_dumpdata_proxy_with_concrete(self):
"""
A warning isn't displayed if a proxy model is dumped with its concrete
parent.
"""
spy = ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(
['fixtures.ProxySpy', 'fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy.pk
)
self.assertEqual(len(warning_list), 0)
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading_gzip(self):
management.call_command('loaddata', 'fixture5.json.gz', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
@unittest.skipUnless(HAS_BZ2, 'No bz2 library detected.')
def test_compressed_loading_bz2(self):
management.call_command('loaddata', 'fixture5.json.bz2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
@unittest.skipUnless(HAS_LZMA, 'No lzma library detected.')
def test_compressed_loading_lzma(self):
management.call_command('loaddata', 'fixture5.json.lzma', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
@unittest.skipUnless(HAS_LZMA, 'No lzma library detected.')
def test_compressed_loading_xz(self):
management.call_command('loaddata', 'fixture5.json.xz', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambiguous, so loading raises an error.
msg = "Multiple fixtures named 'fixture5'"
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture5', verbosity=0)
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Loading a fixture which contains an invalid object outputs an error
message which contains the pk of the object that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
with connection.cursor() as cursor:
cursor.execute("SET sql_mode = 'TRADITIONAL'")
msg = 'Could not load fixtures.Article(pk=1):'
with self.assertRaisesMessage(IntegrityError, msg):
management.call_command('loaddata', 'invalid.json', verbosity=0)
@unittest.skipUnless(connection.vendor == 'postgresql', 'psycopg2 prohibits null characters in data.')
def test_loaddata_null_characters_on_postgresql(self):
msg = (
'Could not load fixtures.Article(pk=2): '
'A string literal cannot contain NUL (0x00) characters.'
)
with self.assertRaisesMessage(ValueError, msg):
management.call_command('loaddata', 'null_character_in_field_value.json')
def test_loaddata_app_option(self):
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_1' found."):
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="someotherapp")
self.assertQuerysetEqual(Article.objects.all(), [])
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="fixtures")
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
])
def test_loaddata_verbosity_three(self):
output = StringIO()
management.call_command('loaddata', 'fixture1.json', verbosity=3, stdout=output, stderr=output)
command_output = output.getvalue()
self.assertIn(
"\rProcessed 1 object(s).\rProcessed 2 object(s)."
"\rProcessed 3 object(s).\rProcessed 4 object(s).\n",
command_output
)
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, database='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, database='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0, database='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django '
'Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, '
'{"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" '
'model="fixtures.tag"><field type="CharField" name="name">copyright</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt'
'</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane '
'Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Prince</field></object></django-objects>',
format='xml', natural_foreign_keys=True
)
def test_loading_with_exclude_app(self):
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1', exclude=['fixtures'], verbosity=0)
self.assertFalse(Article.objects.exists())
self.assertFalse(Category.objects.exists())
self.assertQuerysetEqual(Site.objects.all(), ['<Site: example.com>'])
def test_loading_with_exclude_model(self):
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1', exclude=['fixtures.Article'], verbosity=0)
self.assertFalse(Article.objects.exists())
self.assertQuerysetEqual(Category.objects.all(), ['<Category: News Stories>'])
self.assertQuerysetEqual(Site.objects.all(), ['<Site: example.com>'])
def test_exclude_option_errors(self):
"""Excluding a bogus app or model should raise an error."""
msg = "No installed app with label 'foo_app'."
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture1', exclude=['foo_app'], verbosity=0)
msg = "Unknown model: fixtures.FooModel"
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture1', exclude=['fixtures.FooModel'], verbosity=0)
def test_stdin_without_format(self):
"""Reading from stdin raises an error if format isn't specified."""
msg = '--format must be specified when reading from stdin.'
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', '-', verbosity=0)
def test_loading_stdin(self):
"""Loading fixtures from stdin with json and xml."""
tests_dir = os.path.dirname(__file__)
fixture_json = os.path.join(tests_dir, 'fixtures', 'fixture1.json')
fixture_xml = os.path.join(tests_dir, 'fixtures', 'fixture3.xml')
with mock.patch('django.core.management.commands.loaddata.sys.stdin', open(fixture_json)):
management.call_command('loaddata', '--format=json', '-', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
with mock.patch('django.core.management.commands.loaddata.sys.stdin', open(fixture_xml)):
management.call_command('loaddata', '--format=xml', '-', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Time to reform copyright>',
'<Article: Poker on TV is great!>',
])
class NonexistentFixtureTests(TestCase):
"""
Custom class to limit fixture dirs.
"""
def test_loaddata_not_existent_fixture_file(self):
stdout_output = StringIO()
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', stdout=stdout_output)
@mock.patch('django.db.connection.enable_constraint_checking')
@mock.patch('django.db.connection.disable_constraint_checking')
def test_nonexistent_fixture_no_constraint_checking(
self, disable_constraint_checking, enable_constraint_checking):
"""
If no fixtures match the loaddata command, constraints checks on the
database shouldn't be disabled. This is performance critical on MSSQL.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', verbosity=0)
disable_constraint_checking.assert_not_called()
enable_constraint_checking.assert_not_called()
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
msg = "Multiple fixtures named 'fixture2'"
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture2', verbosity=0)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
class ForwardReferenceTests(DumpDataAssertMixin, TestCase):
def test_forward_reference_fk(self):
management.call_command('loaddata', 'forward_reference_fk.json', verbosity=0)
t1, t2 = NaturalKeyThing.objects.all()
self.assertEqual(t1.other_thing, t2)
self.assertEqual(t2.other_thing, t1)
self._dumpdata_assert(
['fixtures'],
'[{"model": "fixtures.naturalkeything", "pk": 1, '
'"fields": {"key": "t1", "other_thing": 2, "other_things": []}}, '
'{"model": "fixtures.naturalkeything", "pk": 2, '
'"fields": {"key": "t2", "other_thing": 1, "other_things": []}}]',
)
def test_forward_reference_fk_natural_key(self):
management.call_command(
'loaddata',
'forward_reference_fk_natural_key.json',
verbosity=0,
)
t1, t2 = NaturalKeyThing.objects.all()
self.assertEqual(t1.other_thing, t2)
self.assertEqual(t2.other_thing, t1)
self._dumpdata_assert(
['fixtures'],
'[{"model": "fixtures.naturalkeything", '
'"fields": {"key": "t1", "other_thing": ["t2"], "other_things": []}}, '
'{"model": "fixtures.naturalkeything", '
'"fields": {"key": "t2", "other_thing": ["t1"], "other_things": []}}]',
natural_primary_keys=True,
natural_foreign_keys=True,
)
def test_forward_reference_m2m(self):
management.call_command('loaddata', 'forward_reference_m2m.json', verbosity=0)
self.assertEqual(NaturalKeyThing.objects.count(), 3)
t1 = NaturalKeyThing.objects.get_by_natural_key('t1')
self.assertQuerysetEqual(
t1.other_things.order_by('key'),
['<NaturalKeyThing: t2>', '<NaturalKeyThing: t3>']
)
self._dumpdata_assert(
['fixtures'],
'[{"model": "fixtures.naturalkeything", "pk": 1, '
'"fields": {"key": "t1", "other_thing": null, "other_things": [2, 3]}}, '
'{"model": "fixtures.naturalkeything", "pk": 2, '
'"fields": {"key": "t2", "other_thing": null, "other_things": []}}, '
'{"model": "fixtures.naturalkeything", "pk": 3, '
'"fields": {"key": "t3", "other_thing": null, "other_things": []}}]',
)
def test_forward_reference_m2m_natural_key(self):
management.call_command(
'loaddata',
'forward_reference_m2m_natural_key.json',
verbosity=0,
)
self.assertEqual(NaturalKeyThing.objects.count(), 3)
t1 = NaturalKeyThing.objects.get_by_natural_key('t1')
self.assertQuerysetEqual(
t1.other_things.order_by('key'),
['<NaturalKeyThing: t2>', '<NaturalKeyThing: t3>']
)
self._dumpdata_assert(
['fixtures'],
'[{"model": "fixtures.naturalkeything", '
'"fields": {"key": "t1", "other_thing": null, "other_things": [["t2"], ["t3"]]}}, '
'{"model": "fixtures.naturalkeything", '
'"fields": {"key": "t2", "other_thing": null, "other_things": []}}, '
'{"model": "fixtures.naturalkeything", '
'"fields": {"key": "t3", "other_thing": null, "other_things": []}}]',
natural_primary_keys=True,
natural_foreign_keys=True,
)
class CircularReferenceTests(DumpDataAssertMixin, TestCase):
def test_circular_reference(self):
management.call_command('loaddata', 'circular_reference.json', verbosity=0)
obj_a = CircularA.objects.get()
obj_b = CircularB.objects.get()
self.assertEqual(obj_a.obj, obj_b)
self.assertEqual(obj_b.obj, obj_a)
self._dumpdata_assert(
['fixtures'],
'[{"model": "fixtures.circulara", "pk": 1, '
'"fields": {"key": "x", "obj": 1}}, '
'{"model": "fixtures.circularb", "pk": 1, '
'"fields": {"key": "y", "obj": 1}}]',
)
def test_circular_reference_natural_key(self):
management.call_command(
'loaddata',
'circular_reference_natural_key.json',
verbosity=0,
)
obj_a = CircularA.objects.get()
obj_b = CircularB.objects.get()
self.assertEqual(obj_a.obj, obj_b)
self.assertEqual(obj_b.obj, obj_a)
self._dumpdata_assert(
['fixtures'],
'[{"model": "fixtures.circulara", '
'"fields": {"key": "x", "obj": ["y"]}}, '
'{"model": "fixtures.circularb", '
'"fields": {"key": "y", "obj": ["x"]}}]',
natural_primary_keys=True,
natural_foreign_keys=True,
)
|
|
##
#######################################################################################################################
#
# Copyright (c) 2019-2022 Advanced Micro Devices, Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#######################################################################################################################
#!/usr/bin/python
import collections
import csv
import glob
import os
import re
import sys
try:
dict.iteritems
except AttributeError:
# Python 3
def itervalues(d):
return iter(d.values())
def iteritems(d):
return iter(d.items())
else:
# Python 2
def itervalues(d):
return d.itervalues()
def iteritems(d):
return d.iteritems()
QueueCallCol = 0
CmdBufIndexCol = QueueCallCol + 1
CmdBufCallCol = CmdBufIndexCol + 1
SubQueueIdxCol = CmdBufCallCol + 1
StartClockCol = SubQueueIdxCol + 1
EndClockCol = StartClockCol + 1
TimeCol = EndClockCol + 1
PipelineHashCol = TimeCol + 1
CompilerHashCol = PipelineHashCol + 1
VsCsCol = CompilerHashCol + 1
HsCol = VsCsCol + 1
DsCol = HsCol + 1
GsCol = DsCol + 1
PsCol = GsCol + 1
VertsThdGrpsCol = PsCol + 1
InstancesCol = VertsThdGrpsCol + 1
CommentsCol = InstancesCol + 1
def isValidHash(string):
# A valid hash is a non-empty string that represents a non-zero hex value.
return string and (int(string, 16) != 0)
def DeterminePipelineType(row):
if not row[CompilerHashCol]:
return "No Pipeline (BLT, Barrier, etc.)"
else:
if re.search("Dispatch", row[2]):
return "Cs"
elif isValidHash(row[HsCol]) and isValidHash(row[GsCol]):
return "VsHsDsGsPs"
elif isValidHash(row[HsCol]):
return "VsHsDsPs"
elif isValidHash(row[GsCol]):
return "VsGsPs"
else:
return "VsPs"
enPrintAllPipelines = False
if len(sys.argv) > 3 or len(sys.argv) < 2:
sys.exit("Usage: timingReport.py <full path to log folder> [-all].")
elif len(sys.argv) == 3:
if sys.argv[2] == "-all":
enPrintAllPipelines = True
else:
sys.exit("Usage: timingReport.py <full path to log folder>. [-all]")
gpuFrameTime = 0
os.chdir(sys.argv[1])
files = glob.glob("frame*.csv")
if (len(files) == 0):
sys.exit("ERROR: Looking at directory <{0}> but cannot find any files that match the \"frame*.csv\" pattern.".format(os.getcwd()))
frames = { } # Frame num -> [ tsFreq, cmdBufClockPairs, total barrier time ]
perCallTable = { } # Device -> Engine -> QueueId -> Call -> [ count, totalTime ]
perPipelineTypeTable = { } # PipelineType -> [ count, totalTime ]
perPipelineTable = { } # Pipeline Hash -> [ type, count, totalTime, vs/csHash, hsHash, dsHash, gsHash, psHash ]
perPsTable = { } # PS Hash -> [ count, totalTime ]
pipelineRangeTable = { } # Frame num -> EngineType -> PipelineHash -> [(startClock1, endClock1, time1), (startClock2, endClock2, time2), ...]
frameCount = 0
submitCount = 0
cmdBufCount = 0
filesProcessedSoFar = 0 # For printing parsing progress.
for file in files:
if sys.stdout.isatty():
sys.stdout.write("Parsing input files. {0:.0f}% Complete.\r".format((filesProcessedSoFar / float(len(files))) * 100))
filesProcessedSoFar += 1
# Decode file name.
searchObj = re.search("frame([0-9]*)Dev([0-9]*)Eng(\D*)([0-9]*)-([0-9]*)\.csv", file)
frameNum = int(searchObj.group(1))
deviceNum = int(searchObj.group(2))
engineType = searchObj.group(3)
engineId = int(searchObj.group(4))
queueId = int(searchObj.group(5))
# Track the fact we've never seen this frame before:
# - Zero out the time spend in barriers for it.
# - Place some empty maps in the pipelineRangeTable.
if not frameNum in frames:
frames[frameNum] = [0, [], 0]
pipelineRangeTable[frameNum] = { "Ace" : {}, "Dma" : {}, "Gfx" : {} }
# Expand C,D,U to full engine type name.
if engineType == "Ace":
engineKey = "Compute"
elif engineType == "Dma":
engineKey = "DMA"
elif engineType == "Gfx":
engineKey = "Universal"
else:
continue
# Create readable keys for perCallTable, will be displayed later.
deviceKey = "Device " + str(deviceNum)
engineKey = engineKey + " Engine " + str(engineId)
queueKey = "Queue " + str(queueId)
if not deviceKey in perCallTable.keys():
perCallTable[deviceKey] = { }
if not engineKey in perCallTable[deviceKey].keys():
perCallTable[deviceKey][engineKey] = { }
if not queueKey in perCallTable[deviceKey][engineKey].keys():
perCallTable[deviceKey][engineKey][queueKey] = { }
with open(file) as csvFile:
reader = csv.reader(csvFile, skipinitialspace=True)
headers = next(reader)
tsFreqSearch = re.search(".*Frequency: (\d+).*", headers[TimeCol])
frames[frameNum][0] = int(tsFreqSearch.group(1))
for row in reader:
if row[QueueCallCol] == "Submit()":
submitCount += 1
if row[CmdBufCallCol] == "Begin()" and row[StartClockCol]:
frames[frameNum][1].append((int(row[StartClockCol]), int(row[EndClockCol])))
cmdBufCount += 1
if row[TimeCol]:
if row[CmdBufCallCol] in perCallTable[deviceKey][engineKey][queueKey].keys():
perCallTable[deviceKey][engineKey][queueKey][row[CmdBufCallCol]][0] += 1
perCallTable[deviceKey][engineKey][queueKey][row[CmdBufCallCol]][1] += float(row[TimeCol])
else:
perCallTable[deviceKey][engineKey][queueKey][row[CmdBufCallCol]] = [ 1, float(row[TimeCol]) ]
pipelineType = DeterminePipelineType(row)
if pipelineType in perPipelineTypeTable:
perPipelineTypeTable[pipelineType][0] += 1
perPipelineTypeTable[pipelineType][1] += float(row[TimeCol])
else:
perPipelineTypeTable[pipelineType] = [ 1, float(row[TimeCol]) ]
if row[CompilerHashCol]:
# Update the perPipelineTable totals.
# Note that in practice the compiler hash is most useful because it's in all of the pipeline dumps.
if row[CompilerHashCol] in perPipelineTable:
perPipelineTable[row[CompilerHashCol]][1] += 1
perPipelineTable[row[CompilerHashCol]][2] += float(row[TimeCol])
else:
perPipelineTable[row[CompilerHashCol]] = [ pipelineType, 1, float(row[TimeCol]), row[VsCsCol], row[HsCol], row[DsCol], row[GsCol], row[PsCol] ]
# Record the start and end clocks and the time of this shader work in the pipelineRangeTable.
# Note that we may divide by zero later unless we exclude rows with identical start and end clocks.
startClock = int(row[StartClockCol])
endClock = int(row[EndClockCol])
if endClock - startClock > 0:
if row[CompilerHashCol] in pipelineRangeTable[frameNum][engineType]:
pipelineRangeTable[frameNum][engineType][row[CompilerHashCol]].append((startClock, endClock, float(row[TimeCol])))
else:
pipelineRangeTable[frameNum][engineType][row[CompilerHashCol]] = [(startClock, endClock, float(row[TimeCol]))]
if row[PsCol]:
if row[PsCol] in perPsTable:
perPsTable[row[PsCol]][0] += 1
perPsTable[row[PsCol]][1] += float(row[TimeCol])
else:
perPsTable[row[PsCol]] = [ 1, float(row[TimeCol]) ]
if row[CmdBufCallCol] == "CmdBarrier()":
frames[frameNum][2] += float(row[TimeCol])
csvFile.close
# Compute the sum of all GPU frame times, where the time of a single frame is the amount of time the GPU spent being busy.
# We can do this by creating a list of all GPU clock ranges when the GPU was busy from the list of all command buffer clock ranges like so:
# - For the current frame, sort the list of top-level command buffer (begin, end) clocks by increasing begin time.
# - Pop the top (begin, end) pair and use it to start a new GPU busy range.
# - While the top pair overlaps with the busy range, update the busy range with the latest ending time and pop the top pair.
# - Once there are no more overlapping ranges, push the current range as a complete busy range and repeat.
# Once that is done we can simply sum the busy ranges to get the ammount of time the GPU was busy for the current frame.
gpuFrameTime = 0
for frame in frames.keys():
tsFreq = frames[frame][0]
orderedRanges = sorted(frames[frame][1], key=lambda x: x[0])
busyRanges = []
while orderedRanges:
(curBegin, curEnd) = orderedRanges.pop(0)
while orderedRanges and orderedRanges[0][0] <= curEnd:
curEnd = max(curEnd, orderedRanges[0][1])
orderedRanges.pop(0)
busyRanges.append((curBegin, curEnd))
for (begin, end) in busyRanges:
gpuFrameTime += ((1000000 * (end - begin)) / tsFreq)
frameCount = int(len(frames))
gpuFrameTime /= frameCount
print("Average GPU busy time per frame: {0:.3f}ms ({1:,d} frames)".format(gpuFrameTime / 1000.0, frameCount))
print("Average submits per frame: " + str(submitCount / frameCount))
print("Average command buffers per frame: " + str(cmdBufCount / frameCount))
print("")
for deviceKey in iter(sorted(perCallTable)):
print("== Frame Breakdown By Command Buffer Call =======================================================================================================\n")
if len(perCallTable) > 1:
print(" + " + deviceKey + ":")
for engineKey in iter(sorted(perCallTable[deviceKey])):
for queueKey in iter(sorted(perCallTable[deviceKey][engineKey])):
print(" {0:37s}| Avg. Call Count | Avg. GPU Time [us] | Avg. Frame % ".format(engineKey + " (" + queueKey + ") Calls"))
print(" --------------------------------------+-----------------+--------------------+--------------")
totalQueueCount = 0
totalQueueTime = 0
for callId in collections.OrderedDict(sorted(perCallTable[deviceKey][engineKey][queueKey].items(), key=lambda x: x[1][1], reverse=True)):
count = perCallTable[deviceKey][engineKey][queueKey][callId][0] / frameCount
totalQueueCount += count
time = perCallTable[deviceKey][engineKey][queueKey][callId][1] / frameCount
totalQueueTime += time
print(" {0:38s}| {1:12,.2f} | {2:12,.2f} | {3:5.2f} %".
format(callId, count, time, (time / gpuFrameTime) * 100))
print(" --------------------------------------+-----------------+--------------------+--------------")
print(" Total | {0:12,.2f} | {1:>12,.2f} | {2:5.2f} %\n\n".
format(totalQueueCount, totalQueueTime, (totalQueueTime / gpuFrameTime) * 100))
print("== Frame Breakdown By Pipeline Type =============================================================================================================\n")
print(" Pipeline Type | Avg. Call Count | Avg. GPU Time [us] | Avg. Frame %")
print(" --------------------------------------+-----------------+--------------------|--------------")
for pipelineType in collections.OrderedDict(sorted(perPipelineTypeTable.items(), key=lambda x: x[1][1], reverse=True)):
timePerFrame = perPipelineTypeTable[pipelineType][1] / frameCount
pctOfFrame = (timePerFrame / gpuFrameTime) * 100
print(" {0:37s} | {1:12,.2f} | {2:>12,.2f} | {3:5.2f} %".
format(pipelineType,
perPipelineTypeTable[pipelineType][0] / frameCount,
timePerFrame,
pctOfFrame))
print("\n")
print("== Top Pipelines (>= 1%) ========================================================================================================================\n")
pipelineNum = 0
hidden = 0
print(" Compiler Hash | Type | Avg. Call Count | Avg. GPU Time [us] | Avg. Frame %")
print(" -----------------------+--------------+-----------------+--------------------|--------------")
for pipeline in collections.OrderedDict(sorted(perPipelineTable.items(), key=lambda x: x[1][2], reverse=True)):
pipelineNum += 1
timePerFrame = perPipelineTable[pipeline][2] / frameCount
pctOfFrame = (timePerFrame / gpuFrameTime) * 100
if pctOfFrame < 1.0 and not enPrintAllPipelines:
hidden += 1
else:
print(" {0:2d}. {1:s} | {2:10s} | {3:12,.2f} | {4:>12,.2f} | {5:5.2f} %".
format(pipelineNum,
pipeline,
perPipelineTable[pipeline][0],
perPipelineTable[pipeline][1] / frameCount,
timePerFrame,
pctOfFrame))
if hidden > 0:
print("\n + {0:d} pipelines not shown (< 1%).".format(hidden))
print("\n")
print("== Top Pipeline/Shader Hashes (>= 1%) ===========================================================================================================\n")
pipelineNum = 0
hidden = 0
print(" Compiler Hash | Type | VS/CS Hash | HS Hash | DS Hash | GS Hash | PS Hash ")
print(" -----------------------+------------+------------------------------------+------------------------------------+------------------------------------+------------------------------------+------------------------------------")
for pipeline in collections.OrderedDict(sorted(perPipelineTable.items(), key=lambda x: x[1][2], reverse=True)):
pipelineNum += 1
timePerFrame = perPipelineTable[pipeline][2] / frameCount
pctOfFrame = (timePerFrame / gpuFrameTime) * 100
if pctOfFrame < 1.0 and not enPrintAllPipelines:
hidden += 1
else:
pipelineHashes = perPipelineTable[pipeline]
vsCsHash = pipelineHashes[3] if isValidHash(pipelineHashes[3]) else ""
hsHash = pipelineHashes[4] if isValidHash(pipelineHashes[4]) else ""
dsHash = pipelineHashes[5] if isValidHash(pipelineHashes[5]) else ""
gsHash = pipelineHashes[6] if isValidHash(pipelineHashes[6]) else ""
psHash = pipelineHashes[7] if isValidHash(pipelineHashes[7]) else ""
print(" {0:2d}. {1:18s} | {2:10s} | {3:34s} | {4:34s} | {5:34s} | {6:34s} | {7:34s} ".
format(pipelineNum,
pipeline,
pipelineHashes[0],
vsCsHash, hsHash, dsHash, gsHash, psHash))
if hidden > 0:
print("\n + {0:d} pipelines not shown (< 1%).".format(hidden))
print("\n")
print("== Top Pixel Shaders (>= 1%) ====================================================================================================================\n")
psNum = 0
hidden = 0
print(" PS Hash | Avg. Call Count | Avg. GPU Time [us] | Avg. Frame %")
print(" ----------------------------------------+-----------------+--------------------|--------------")
for ps in collections.OrderedDict(sorted(perPsTable.items(), key=lambda x: x[1][1], reverse=True)):
psNum += 1
timePerFrame = perPsTable[ps][1] / frameCount
pctOfFrame = (timePerFrame / gpuFrameTime) * 100
if pctOfFrame < 1.0 and not enPrintAllPipelines:
hidden += 1
else:
print(" {0:2d}. {1:36s}| {2:12,.2f} | {3:>12,.2f} | {4:5.2f} %".
format(psNum,
ps,
perPsTable[ps][0] / frameCount,
timePerFrame,
pctOfFrame))
if hidden > 0:
print("\n + {0:d} pixel shaders not shown (< 1%).".format(hidden))
print("\n")
# Identify frame with median time spent in barriers.
medianBarrierFrame = list(collections.OrderedDict(sorted(list(iteritems(frames)), key=lambda x: x[1][2])).keys())[int(frameCount / 2)]
barrierTime = 0
barrierReportTable = [ ] # [time, [desc, ...] ]
for file in files:
# Decode file name.
searchObj = re.search("frame([0-9]*)Dev([0-9]*)Eng(\D*)([0-9]*)-([0-9]*)\.csv", file)
frameNum = int(searchObj.group(1))
engineType = searchObj.group(3)
if not (engineType == "Ace" or engineType == "Dma" or engineType == "Gfx"):
continue
if frameNum == medianBarrierFrame:
with open(file) as csvFile:
reader = csv.reader(csvFile, skipinitialspace=True)
next(reader)
for row in reader:
if row[CmdBufCallCol] == "CmdBarrier()":
barrierTime += float(row[TimeCol])
entry = [float(row[TimeCol]), [ ] ]
if row[CommentsCol] == "":
entry[1].append(["-", "", 0, 0])
else:
actionList = row[CommentsCol].split("\n")
for action in actionList:
if ('CacheMask' not in action) and ('OldLayout' not in action) and ('NewLayout' not in action):
searchObj = re.search("(.*): ([0-9]*)x([0-9]*) (.*)", action)
if searchObj != None:
actionType = searchObj.group(1)
width = int(searchObj.group(2))
height = int(searchObj.group(3))
format = searchObj.group(4)
entry[1].append([actionType, format, width, height])
else:
entry[1].append([action, "", 0, 0])
barrierReportTable.append(entry)
csvFile.close
print("== Median Frame Top CmdBarrier() Calls (>= 10us): ===============================================================================================\n")
print("Frame #{0:d} total barrier time: {1:,.2f} us\n".format(medianBarrierFrame, barrierTime))
print(" Layout Transition(s) | Format | Dimensions | Time [us]")
print(" ----------------------------------------------------------------+------------------------------+-------------+-----------")
barrierNum = 0
hidden = 0
for barrier in sorted(barrierReportTable, key=lambda x: x[0], reverse=True):
barrierNum +=1
if barrier[0] < 10:
hidden += 1
else:
firstLine = True
actions = sorted(barrier[1], key=lambda x: x[3], reverse=True)
for action in actions:
dimensions = "{0:4d} x {1:4d}".format(action[2], action[3]) if action[2] != 0 and action[3] != 0 else " "
if firstLine:
print(" {0:2d}. {2:58s} | {3:26s} | {4:s} | {1:>8,.2f}".
format(barrierNum, barrier[0], action[0], action[1], dimensions))
else:
print(" {0:58s} | {1:26s} | {2:s} |".
format(action[0], action[1], dimensions))
firstLine = False
if hidden > 0:
print("\n + {0:d} CmdBarrier() calls not shown (< 10us).\n".format(hidden))
print("\n")
asyncOverlapTable = { } # computePipelineHash -> [totalOverlapTime, universalPipelineHash -> overlapTime]
for frameNum in iter(sorted(pipelineRangeTable)):
# Check for overlap between all pairs of compute and universal executions in this frame.
for cPipeline, cClocks in pipelineRangeTable[frameNum]["Ace"].items():
if cPipeline not in asyncOverlapTable:
asyncOverlapTable[cPipeline] = [0, { }]
for cStart, cEnd, cTime in cClocks:
for uPipeline, uClocks in pipelineRangeTable[frameNum]["Gfx"].items():
for uStart, uEnd, uTime in uClocks:
# If these clock ranges intersect, compute the portion of the compute time that overlaps with the universal work.
# Note that we treat the clocks as dimensionless numbers, we never need to know the clock frequency.
if uStart < cEnd and uEnd > cStart:
overlapTime = cTime * (min(cEnd, uEnd) - max(cStart, uStart)) / (cEnd - cStart)
asyncOverlapTable[cPipeline][0] += overlapTime
if uPipeline in asyncOverlapTable[cPipeline][1]:
asyncOverlapTable[cPipeline][1][uPipeline] += overlapTime
else:
asyncOverlapTable[cPipeline][1][uPipeline] = overlapTime
if len(asyncOverlapTable.keys()) > 0:
print("== Async Compute Overlap ========================================================================================================================\n")
print(" Async Compiler Hash | Gfx Compiler Hash | Avg. GPU Time [us] | Avg. Frame %")
print(" ------------------------+----------------------+--------------------+--------------")
pipelineNum = 0
for cPipeline in collections.OrderedDict(sorted(asyncOverlapTable.items(), key=lambda x: x[1][0], reverse=True)):
pipelineNum += 1
timePerFrame = asyncOverlapTable[cPipeline][0] / frameCount
pctOfFrame = (timePerFrame / gpuFrameTime) * 100
print(" {0:2d}. {1:s} | Total | {2:>12,.2f} | {3:5.2f} %".
format(pipelineNum, cPipeline, timePerFrame, pctOfFrame))
numTrailing = 0
trailingTimeTotal = 0
for uPipeline in collections.OrderedDict(sorted(asyncOverlapTable[cPipeline][1].items(), key=lambda x: x[1], reverse=True)):
timePerFrame = asyncOverlapTable[cPipeline][1][uPipeline] / frameCount
pctOfFrame = (timePerFrame / gpuFrameTime) * 100
if pctOfFrame < 0.10:
numTrailing += 1
trailingTimeTotal += timePerFrame
else:
print(" | {0:s} | {1:>12,.2f} | {2:5.2f} %".
format(uPipeline, timePerFrame, pctOfFrame))
pctOfFrame = (trailingTimeTotal / gpuFrameTime) * 100
print(" | Num Hidden: {0:<6d} | {1:>12,.2f} | {2:5.2f} %".
format(numTrailing, trailingTimeTotal, pctOfFrame))
timePerFrame = (perPipelineTable[cPipeline][2] - asyncOverlapTable[cPipeline][0]) / frameCount
pctOfFrame = (timePerFrame / gpuFrameTime) * 100
print(" | No Overlap | {0:>12,.2f} | {1:5.2f} %".
format(timePerFrame, pctOfFrame))
print(" ------------------------+----------------------+--------------------+--------------")
|
|
import datetime
import mock
import unittest
from fakes import FakeReading
from usage.fields import field_function
from usage.fields.item import billing_entity
from usage.fields.item import currency_code
from usage.fields.item import description
from usage.fields.item import item_rate
from usage.fields.item import line_item_type
from usage.fields.item import meter_name
from usage.fields.item import operation
from usage.fields.item import product_code
from usage.fields.item import product_name
from usage.fields.item import usage_type
from usage.fields.reading import availability_zone
from usage.fields.reading import billing_period_end_date
from usage.fields.reading import billing_period_start_date
from usage.fields.reading import cost
from usage.fields.reading import display_name
from usage.fields.reading import hours
from usage.fields.reading import image_metadata_field
from usage.fields.reading import instance_type
from usage.fields.reading import metadata_field
from usage.fields.reading import payer_account_id
from usage.fields.reading import project_id
from usage.fields.reading import resource_id
from usage.fields.reading import timeinterval
from usage.fields.reading import usage_account_id
from usage.fields.reading import usage_amount
from usage.fields.reading import usage_end_date
from usage.fields.reading import usage_start_date
from usage.fields.report import invoice_id
from usage.exc import UnknownFieldFunctionError
def broken_field(d, i, r):
raise Exception("I am broken.")
return 'worked'
class TestFieldFunction(unittest.TestCase):
"""Tests the conversion plugin loaded."""
def test_unknown_field_function(self):
with self.assertRaises(UnknownFieldFunctionError):
field_function('doesntexist', 'd', 'i', 'r')
@mock.patch(
'usage.fields.FIELD_FUNCTIONS',
{'broken_field': broken_field}
)
def test_broken_field_function(self):
self.assertTrue(
field_function('broken_field', 'd', 'i', 'r') is None
)
class TestMetadataField(unittest.TestCase):
"""Tests the metadata field function."""
key = 'metadata:test'
def test_metadata_field_not_present(self):
r = FakeReading(metadata={})
self.assertTrue(metadata_field(self.key, r) is None)
def test_nova_metadata(self):
metadata = {'metadata.test': 'nova'}
r = FakeReading(metadata=metadata)
self.assertEquals(metadata_field(self.key, r), 'nova')
# Test insensitive metadata
metadata = {'metadata.TeSt': 'nova'}
r = FakeReading(metadata=metadata)
self.assertEquals(metadata_field(self.key, r), 'nova')
# Test insensitive key
self.assertEquals(metadata_field('metadata:TEst', r), 'nova')
def test_glance_metdata(self):
metadata = {'properties.test': 'glance'}
r = FakeReading(metadata=metadata)
self.assertEquals(metadata_field(self.key, r), 'glance')
# Test insensitive metadata
metadata = {'properties.TeST': 'glance'}
r = FakeReading(metadata=metadata)
self.assertEquals(metadata_field(self.key, r), 'glance')
# Test insensitive key
self.assertEquals(metadata_field('metadata:tESt', r), 'glance')
def test_cinder_metadata(self):
metadata = {
'metadata': unicode("[{'key': 'test', 'value': 'cinder'}]")
}
r = FakeReading(metadata=metadata)
self.assertEquals(metadata_field(self.key, r), 'cinder')
# Test insensitive metadata
metadata = {
'metadata': unicode("[{'key': 'TeSt', 'value': 'cinder'}]")
}
r = FakeReading(metadata=metadata)
self.assertEquals(metadata_field(self.key, r), 'cinder')
# Test insensitive key
self.assertEquals(metadata_field('metadata:tEsT', r), 'cinder')
class TestImageMetadataField(unittest.TestCase):
"""Tests the image metadata field function."""
key = 'image_metadata:test'
def test_image_metadata(self):
metadata = {'image_meta.test': 'value'}
r = FakeReading(metadata=metadata)
self.assertEquals(image_metadata_field(self.key, r), 'value')
# Test case insensitivity
metadata = {'image_meta.TeST': 'value'}
r = FakeReading(metadata=metadata)
self.assertEquals(
image_metadata_field('image_metadata:tEsT', r),
'value'
)
# Test missing metadata
metadata = {}
r = FakeReading(metadata=metadata)
assert(image_metadata_field(self.key, r) is None)
class TestResourceId(unittest.TestCase):
"""Tests the resource_id field function."""
def test_resource_id(self):
r = FakeReading()
self.assertEquals(resource_id(None, None, r), 'resource_id')
class TestPayerAccountId(unittest.TestCase):
"""Tests the payer_account_id field function."""
def test_payer_account_id(self):
r = FakeReading()
self.assertEquals(payer_account_id(None, None, r), 'project_id')
class TestProjectId(unittest.TestCase):
"""Tests the project_id field function."""
def test_project_id(self):
r = FakeReading()
self.assertEquals(project_id(None, None, r), 'project_id')
class TestTimeInterval(unittest.TestCase):
"""Tests the timeinterval field function."""
def test_timeinterval(self):
stop = datetime.datetime.utcnow()
start = stop - datetime.timedelta(hours=1)
r = FakeReading(start=start, stop=stop)
expected = '{}/{}'.format(start.isoformat(), stop.isoformat())
self.assertEquals(timeinterval(None, None, r), expected)
class TestInvoiceId(unittest.TestCase):
"""Tests the invoice id field function."""
def test_invoice_id(self):
# This function only returns the empty string right now.
self.assertEquals(invoice_id(None, None, None), '')
class TestBillingEntity(unittest.TestCase):
"""Tests the billing entity field function."""
def test_billing_entity(self):
i = {'billing_entity': 'from_item'}
d = {'billing_entity': 'from_definition'}
self.assertEquals(billing_entity(d, i, None), 'from_item')
self.assertEquals(billing_entity(d, {}, None), 'from_definition')
class TestBillingPeriodStartDate(unittest.TestCase):
"""Tests the billing period start date field function."""
def test_billing_period_start_date(self):
start = datetime.datetime.utcnow()
r = FakeReading(start=start)
self.assertEquals(
billing_period_start_date(None, None, r),
start.isoformat()
)
class TestBillingPeriodEndDate(unittest.TestCase):
"""Tests the billing period end date field function."""
def test_billing_period_end_date(self):
stop = datetime.datetime.utcnow()
r = FakeReading(stop=stop)
self.assertEquals(
billing_period_end_date(None, None, r),
stop.isoformat()
)
class TestCost(unittest.TestCase):
"""Tests the cost field function."""
def test_cost(self):
item = {'item_rate': 1.0}
r = FakeReading(value='1.2345')
# Test default {:.2f}
self.assertEquals(cost({}, item, r), '1.23')
# Test other
d = {'cost_format': '{:.1f}'}
self.assertEquals(cost(d, item, r), '1.2')
class TestDisplayName(unittest.TestCase):
"""Tests the display name field function."""
def test_display_name(self):
r = FakeReading()
self.assertTrue(display_name(None, None, r) is None)
r = FakeReading(metadata={'display_name': 'display_name'})
self.assertEquals(display_name(None, None, r), 'display_name')
class TestHours(unittest.TestCase):
"""Tests the hours field function."""
def test_hours(self):
stop = datetime.datetime.utcnow()
start = stop - datetime.timedelta(hours=1)
r = FakeReading(start=start, stop=stop)
self.assertEquals(hours(None, None, r), 1)
stop = datetime.datetime.utcnow()
start = stop - datetime.timedelta(hours=0.5)
r = FakeReading(start=start, stop=stop)
self.assertEquals(hours(None, None, r), 0.5)
class TestInstanceType(unittest.TestCase):
"""Tests the instance type field function."""
def test_instance_type(self):
r = FakeReading()
self.assertTrue(instance_type(None, None, r) is None)
r = FakeReading(metadata={'instance_type': 'instance_type'})
self.assertEquals(instance_type(None, None, r), 'instance_type')
class TestUsageAccountId(unittest.TestCase):
"""Tests the usage account id field function."""
def test_usage_account_id(self):
r = FakeReading()
self.assertEquals(usage_account_id(None, None, r), 'project_id')
class TestLineItemType(unittest.TestCase):
"""Tests the line item type field function."""
def test_line_item_type(self):
item = {'line_item_type': 'line_item_type'}
self.assertTrue(line_item_type(None, {}, None) is '')
self.assertEquals(line_item_type(None, item, None), 'line_item_type')
class TestMeterName(unittest.TestCase):
"""Tests the meter name field function."""
def test_meter_name(self):
item = {}
self.assertTrue(meter_name(None, item, None) is None)
item['meter_name'] = 'test'
self.assertEquals(meter_name(None, item, None), 'test')
class TestProductCode(unittest.TestCase):
"""Tests the product code field function."""
def test_product_code(self):
item = {'product_code': 'product_code'}
self.assertTrue(product_code(None, {}, None) is '')
self.assertEquals(product_code(None, item, None), 'product_code')
class TestProductName(unittest.TestCase):
"""Tests the product name field function."""
def test_product_name(self):
item = {'product_name': 'product_name'}
self.assertTrue(product_name(None, {}, None) is '')
self.assertEquals(product_name(None, item, None), 'product_name')
class TestUsageType(unittest.TestCase):
"""Tests the usage type field function."""
def test_usage_type(self):
item = {'usage_type': 'usage_type'}
self.assertTrue(usage_type(None, {}, None) is '')
self.assertEquals(usage_type(None, item, None), 'usage_type')
class TestOperation(unittest.TestCase):
"""Tests the operation field function."""
def test_operation(self):
item = {'operation': 'operation'}
self.assertTrue(operation(None, {}, None) is '')
self.assertEquals(operation(None, item, None), 'operation')
class TestUsageStartDate(unittest.TestCase):
"""Tests the usage start date field function."""
def test_usage_start_date(self):
start = datetime.datetime.utcnow()
r = FakeReading(start=start)
self.assertEquals(usage_start_date(None, None, r), start.isoformat())
class TestUsageEndDate(unittest.TestCase):
"""Tests the usage end date field function."""
def test_usage_end_date(self):
stop = datetime.datetime.utcnow()
r = FakeReading(stop=stop)
self.assertEquals(usage_end_date(None, None, r), stop.isoformat())
class TestAvailabilityZone(unittest.TestCase):
"""Tests the availability zone field function."""
def test_availability_zone(self):
metadata = {}
r = FakeReading(metadata=metadata)
self.assertTrue(availability_zone(None, None, r) is '')
metadata = {'availability_zone': 'availability_zone'}
r = FakeReading(metadata=metadata)
self.assertEquals(
availability_zone(None, None, r),
'availability_zone'
)
class TestUsageAmount(unittest.TestCase):
"""Tests the usage amount field function."""
def test_usage_amount(self):
r = FakeReading()
self.assertEquals(usage_amount(None, None, r), 'value')
class TestCurrencyCode(unittest.TestCase):
"""Tests the currency code field function."""
def test_currency_code(self):
i = {'currency_code': 'from_item'}
d = {'currency_code': 'from_definition'}
self.assertTrue(currency_code({}, {}, None) is '')
self.assertEquals(currency_code(d, i, None), 'from_item')
self.assertEquals(currency_code(d, {}, None), 'from_definition')
class TestItemRate(unittest.TestCase):
"""Tests the item rate field function."""
def test_item_rate(self):
self.assertEquals(item_rate(None, {}, None), 0.0)
i = {'item_rate': 'item_rate'}
self.assertEquals(item_rate(None, i, None), 'item_rate')
class TestDescription(unittest.TestCase):
"""Tests the description field function."""
def test_description(self):
i = {'description': 'description'}
self.assertTrue(description(None, {}, None) is '')
self.assertEquals(description(None, i, None), 'description')
|
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend management
"""
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_log import log
import retrying
import six
import six.moves.urllib.parse as urlparse
from stevedore import driver
from ceilometer import utils
LOG = log.getLogger(__name__)
OLD_OPTS = [
cfg.StrOpt('database_connection',
secret=True,
help='DEPRECATED - Database connection string.',
),
]
cfg.CONF.register_opts(OLD_OPTS)
OPTS = [
cfg.IntOpt('metering_time_to_live',
default=-1,
help="Number of seconds that samples are kept "
"in the database for (<= 0 means forever).",
deprecated_opts=[cfg.DeprecatedOpt('time_to_live',
'database')]),
cfg.IntOpt('event_time_to_live',
default=-1,
help=("Number of seconds that events are kept "
"in the database for (<= 0 means forever).")),
cfg.StrOpt('metering_connection',
secret=True,
default=None,
help='The connection string used to connect to the metering '
'database. (if unset, connection is used)'),
cfg.StrOpt('alarm_connection',
secret=True,
default=None,
deprecated_for_removal=True,
help='The connection string used to connect to the alarm '
'database. (if unset, connection is used)'),
cfg.IntOpt('alarm_history_time_to_live',
default=-1,
deprecated_for_removal=True,
help=("Number of seconds that alarm histories are kept "
"in the database for (<= 0 means forever).")),
cfg.StrOpt('event_connection',
secret=True,
default=None,
help='The connection string used to connect to the event '
'database. (if unset, connection is used)'),
cfg.IntOpt('db2nosql_resource_id_maxlen',
default=512,
help="The max length of resources id in DB2 nosql, "
"the value should be larger than len(hostname) * 2 "
"as compute node's resource id is <hostname>_<nodename>."),
# Deprecated in liberty
cfg.StrOpt('mongodb_replica_set',
deprecated_for_removal=True,
default='',
help=('The name of the replica set which is used to connect to '
'MongoDB database. Add "?replicaSet=myreplicatset" in '
'your connection URI instead.')),
]
cfg.CONF.register_opts(OPTS, group='database')
CLI_OPTS = [
cfg.BoolOpt('sql-expire-samples-only',
default=False,
help="Indicates if expirer expires only samples. If set true,"
" expired samples will be deleted, but residual"
" resource and meter definition data will remain.",
),
]
cfg.CONF.register_cli_opts(CLI_OPTS)
db_options.set_defaults(cfg.CONF)
class StorageUnknownWriteError(Exception):
"""Error raised when an unknown error occurs while recording."""
class StorageBadVersion(Exception):
"""Error raised when the storage backend version is not good enough."""
class StorageBadAggregate(Exception):
"""Error raised when an aggregate is unacceptable to storage backend."""
code = 400
def get_connection_from_config(conf, purpose='metering'):
retries = conf.database.max_retries
# Convert retry_interval secs to msecs for retry decorator
@retrying.retry(wait_fixed=conf.database.retry_interval * 1000,
stop_max_attempt_number=retries if retries >= 0 else None)
def _inner():
if conf.database_connection:
conf.set_override('connection', conf.database_connection,
group='database')
namespace = 'ceilometer.%s.storage' % purpose
url = (getattr(conf.database, '%s_connection' % purpose) or
conf.database.connection)
return get_connection(url, namespace)
return _inner()
def get_connection(url, namespace):
"""Return an open connection to the database."""
connection_scheme = urlparse.urlparse(url).scheme
# SqlAlchemy connections specify may specify a 'dialect' or
# 'dialect+driver'. Handle the case where driver is specified.
engine_name = connection_scheme.split('+')[0]
if engine_name == 'db2':
import warnings
warnings.simplefilter("always")
import debtcollector
debtcollector.deprecate("The DB2nosql driver is no longer supported",
version="Liberty", removal_version="N*-cycle")
# NOTE: translation not applied bug #1446983
LOG.debug('looking for %(name)r driver in %(namespace)r',
{'name': engine_name, 'namespace': namespace})
mgr = driver.DriverManager(namespace, engine_name)
return mgr.driver(url)
class SampleFilter(object):
"""Holds the properties for building a query from a meter/sample filter.
:param user: The sample owner.
:param project: The sample project.
:param start_timestamp: Earliest time point in the request.
:param start_timestamp_op: Earliest timestamp operation in the request.
:param end_timestamp: Latest time point in the request.
:param end_timestamp_op: Latest timestamp operation in the request.
:param resource: Optional filter for resource id.
:param meter: Optional filter for meter type using the meter name.
:param source: Optional source filter.
:param message_id: Optional sample_id filter.
:param metaquery: Optional filter on the metadata
"""
def __init__(self, user=None, project=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
resource=None, meter=None,
source=None, message_id=None,
metaquery=None):
self.user = user
self.project = project
self.start_timestamp = utils.sanitize_timestamp(start_timestamp)
self.start_timestamp_op = start_timestamp_op
self.end_timestamp = utils.sanitize_timestamp(end_timestamp)
self.end_timestamp_op = end_timestamp_op
self.resource = resource
self.meter = meter
self.source = source
self.metaquery = metaquery or {}
self.message_id = message_id
def __repr__(self):
return ("<SampleFilter(user: %s,"
" project: %s,"
" start_timestamp: %s,"
" start_timestamp_op: %s,"
" end_timestamp: %s,"
" end_timestamp_op: %s,"
" resource: %s,"
" meter: %s,"
" source: %s,"
" metaquery: %s,"
" message_id: %s)>" %
(self.user,
self.project,
self.start_timestamp,
self.start_timestamp_op,
self.end_timestamp,
self.end_timestamp_op,
self.resource,
self.meter,
self.source,
self.metaquery,
self.message_id))
class EventFilter(object):
"""Properties for building an Event query.
:param start_timestamp: UTC start datetime (mandatory)
:param end_timestamp: UTC end datetime (mandatory)
:param event_type: the name of the event. None for all.
:param message_id: the message_id of the event. None for all.
:param admin_proj: the project_id of admin role. None if non-admin user.
:param traits_filter: the trait filter dicts, all of which are optional.
This parameter is a list of dictionaries that specify trait values:
.. code-block:: python
{'key': <key>,
'string': <value>,
'integer': <value>,
'datetime': <value>,
'float': <value>,
'op': <eq, lt, le, ne, gt or ge> }
"""
def __init__(self, start_timestamp=None, end_timestamp=None,
event_type=None, message_id=None, traits_filter=None,
admin_proj=None):
self.start_timestamp = utils.sanitize_timestamp(start_timestamp)
self.end_timestamp = utils.sanitize_timestamp(end_timestamp)
self.message_id = message_id
self.event_type = event_type
self.traits_filter = traits_filter or []
self.admin_proj = admin_proj
def __repr__(self):
return ("<EventFilter(start_timestamp: %s,"
" end_timestamp: %s,"
" event_type: %s,"
" traits: %s)>" %
(self.start_timestamp,
self.end_timestamp,
self.event_type,
six.text_type(self.traits_filter)))
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/devtools/clouderrorreporting/v1beta1/error_stats_service.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.devtools.clouderrorreporting.v1beta1 ErrorStatsService API."""
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.cloud.gapic.errorreporting.v1beta1 import enums
from google.devtools.clouderrorreporting.v1beta1 import error_stats_service_pb2
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
_PageDesc = google.gax.PageDescriptor
class ErrorStatsServiceApi(object):
"""
An API for retrieving and managing error statistics as well as data for
individual events.
"""
SERVICE_ADDRESS = 'clouderrorreporting.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
_CODE_GEN_NAME_VERSION = 'gapic/0.1.0'
_GAX_VERSION = pkg_resources.get_distribution('google-gax').version
_PAGE_DESCRIPTORS = {
'list_group_stats': _PageDesc('page_token', 'next_page_token',
'error_group_stats'),
'list_events': _PageDesc('page_token', 'next_page_token',
'error_events')
}
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
_PROJECT_PATH_TEMPLATE = path_template.PathTemplate('projects/{project}')
@classmethod
def project_path(cls, project):
"""Returns a fully-qualified project resource name string."""
return cls._PROJECT_PATH_TEMPLATE.render({'project': project, })
@classmethod
def match_project_from_project_name(cls, project_name):
"""Parses the project from a project resource.
Args:
project_name (string): A fully-qualified path representing a project
resource.
Returns:
A string representing the project.
"""
return cls._PROJECT_PATH_TEMPLATE.match(project_name).get('project')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
metadata_transformer=None,
ssl_creds=None,
scopes=None,
client_config=None,
app_name='gax',
app_version=_GAX_VERSION):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
ssl_creds (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
metadata_transformer (Callable[[], list]): A function that creates
the metadata for requests.
app_name (string): The codename of the calling service.
app_version (string): The version of the calling service.
Returns:
A ErrorStatsServiceApi object.
"""
if scopes is None:
scopes = self._ALL_SCOPES
if client_config is None:
client_config = {}
goog_api_client = '{}/{} {} gax/{} python/{}'.format(
app_name, app_version, self._CODE_GEN_NAME_VERSION,
self._GAX_VERSION, platform.python_version())
metadata = [('x-goog-api-client', goog_api_client)]
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'error_stats_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.devtools.clouderrorreporting.v1beta1.ErrorStatsService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
kwargs={'metadata': metadata},
page_descriptors=self._PAGE_DESCRIPTORS)
self.error_stats_service_stub = config.create_stub(
error_stats_service_pb2.ErrorStatsServiceStub,
service_path,
port,
ssl_creds=ssl_creds,
channel=channel,
metadata_transformer=metadata_transformer,
scopes=scopes)
self._list_group_stats = api_callable.create_api_call(
self.error_stats_service_stub.ListGroupStats,
settings=defaults['list_group_stats'])
self._list_events = api_callable.create_api_call(
self.error_stats_service_stub.ListEvents,
settings=defaults['list_events'])
self._delete_events = api_callable.create_api_call(
self.error_stats_service_stub.DeleteEvents,
settings=defaults['delete_events'])
# Service calls
def list_group_stats(self,
project_name,
time_range,
group_id=None,
service_filter=None,
timed_count_duration=None,
alignment=None,
alignment_time=None,
order=None,
page_size=0,
options=None):
"""
Lists the specified groups.
Example:
>>> from google.cloud.gapic.errorreporting.v1beta1 import error_stats_service_api
>>> from google.devtools.clouderrorreporting.v1beta1 import error_stats_service_pb2
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = error_stats_service_api.ErrorStatsServiceApi()
>>> project_name = api.project_path('[PROJECT]')
>>> time_range = error_stats_service_pb2.QueryTimeRange()
>>>
>>> # Iterate over all results
>>> for element in api.list_group_stats(project_name, time_range):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_group_stats(project_name, time_range, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
project_name (string): [Required] The resource name of the Google Cloud Platform project. Written
as <code>projects/</code> plus the
<a href=\"https://support.google.com/cloud/answer/6158840\">Google Cloud
Platform project ID</a>.
Example: <code>projects/my-project-123</code>.
group_id (list[string]): [Optional] List all <code>ErrorGroupStats</code> with these IDs.
service_filter (:class:`google.devtools.clouderrorreporting.v1beta1.error_stats_service_pb2.ServiceContextFilter`): [Optional] List only <code>ErrorGroupStats</code> which belong to a service
context that matches the filter.
Data for all service contexts is returned if this field is not specified.
time_range (:class:`google.devtools.clouderrorreporting.v1beta1.error_stats_service_pb2.QueryTimeRange`): [Required] List data for the given time range.
Only <code>ErrorGroupStats</code> with a non-zero count in the given time
range are returned, unless the request contains an explicit group_id list.
If a group_id list is given, also <code>ErrorGroupStats</code> with zero
occurrences are returned.
timed_count_duration (:class:`google.protobuf.duration_pb2.Duration`): [Optional] The preferred duration for a single returned ``TimedCount``.
If not set, no timed counts are returned.
alignment (enum :class:`google.cloud.gapic.errorreporting.v1beta1.enums.TimedCountAlignment`): [Optional] The alignment of the timed counts to be returned.
Default is ``ALIGNMENT_EQUAL_AT_END``.
alignment_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): [Optional] Time where the timed counts shall be aligned if rounded
alignment is chosen. Default is 00:00 UTC.
order (enum :class:`google.cloud.gapic.errorreporting.v1beta1.enums.ErrorGroupOrder`): [Optional] The sort order in which the results are returned.
Default is ``COUNT_DESC``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.devtools.clouderrorreporting.v1beta1.error_stats_service_pb2.ErrorGroupStats` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
if group_id is None:
group_id = []
if service_filter is None:
service_filter = error_stats_service_pb2.ServiceContextFilter()
if timed_count_duration is None:
timed_count_duration = duration_pb2.Duration()
if alignment is None:
alignment = enums.TimedCountAlignment.ERROR_COUNT_ALIGNMENT_UNSPECIFIED
if alignment_time is None:
alignment_time = timestamp_pb2.Timestamp()
if order is None:
order = enums.ErrorGroupOrder.GROUP_ORDER_UNSPECIFIED
request = error_stats_service_pb2.ListGroupStatsRequest(
project_name=project_name,
time_range=time_range,
group_id=group_id,
service_filter=service_filter,
timed_count_duration=timed_count_duration,
alignment=alignment,
alignment_time=alignment_time,
order=order,
page_size=page_size)
return self._list_group_stats(request, options)
def list_events(self,
project_name,
group_id,
service_filter=None,
time_range=None,
page_size=0,
options=None):
"""
Lists the specified events.
Example:
>>> from google.cloud.gapic.errorreporting.v1beta1 import error_stats_service_api
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = error_stats_service_api.ErrorStatsServiceApi()
>>> project_name = api.project_path('[PROJECT]')
>>> group_id = ''
>>>
>>> # Iterate over all results
>>> for element in api.list_events(project_name, group_id):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_events(project_name, group_id, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
project_name (string): [Required] The resource name of the Google Cloud Platform project. Written
as ``projects/`` plus the
`Google Cloud Platform project ID <https://support.google.com/cloud/answer/6158840>`_.
Example: ``projects/my-project-123``.
group_id (string): [Required] The group for which events shall be returned.
service_filter (:class:`google.devtools.clouderrorreporting.v1beta1.error_stats_service_pb2.ServiceContextFilter`): [Optional] List only ErrorGroups which belong to a service context that
matches the filter.
Data for all service contexts is returned if this field is not specified.
time_range (:class:`google.devtools.clouderrorreporting.v1beta1.error_stats_service_pb2.QueryTimeRange`): [Optional] List only data for the given time range.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.devtools.clouderrorreporting.v1beta1.common_pb2.ErrorEvent` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
if service_filter is None:
service_filter = error_stats_service_pb2.ServiceContextFilter()
if time_range is None:
time_range = error_stats_service_pb2.QueryTimeRange()
request = error_stats_service_pb2.ListEventsRequest(
project_name=project_name,
group_id=group_id,
service_filter=service_filter,
time_range=time_range,
page_size=page_size)
return self._list_events(request, options)
def delete_events(self, project_name, options=None):
"""
Deletes all error events of a given project.
Example:
>>> from google.cloud.gapic.errorreporting.v1beta1 import error_stats_service_api
>>> api = error_stats_service_api.ErrorStatsServiceApi()
>>> project_name = api.project_path('[PROJECT]')
>>> response = api.delete_events(project_name)
Args:
project_name (string): [Required] The resource name of the Google Cloud Platform project. Written
as ``projects/`` plus the
`Google Cloud Platform project ID <https://support.google.com/cloud/answer/6158840>`_.
Example: ``projects/my-project-123``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = error_stats_service_pb2.DeleteEventsRequest(
project_name=project_name)
self._delete_events(request, options)
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides Guru Meditation Report
This module defines the actual OpenStack Guru Meditation
Report class.
This can be used in the OpenStack command definition files.
For example, in a openapp command module (under openapp/cmd):
.. code-block:: python
:emphasize-lines: 8,9,10
CONF = cfg.CONF
# maybe import some options here...
def main():
config.parse_args(sys.argv)
logging.setup('blah')
TextGuruMeditation.register_section('Some Special Section',
special_section_generator)
TextGuruMeditation.setup_autorun(version_object)
server = service.Service.create(binary='some-service',
topic=CONF.some_service_topic)
service.serve(server)
service.wait()
Then, you can do
.. code-block:: bash
$ kill -USR1 $SERVICE_PID
and get a Guru Meditation Report in the file or terminal
where stderr is logged for that given service.
"""
from __future__ import print_function
import inspect
import os
import signal
import sys
from oslo_utils import timeutils
from openapp.openstack.common.report.generators import conf as cgen
from openapp.openstack.common.report.generators import process as prgen
from openapp.openstack.common.report.generators import threading as tgen
from openapp.openstack.common.report.generators import version as pgen
from openapp.openstack.common.report import report
class GuruMeditation(object):
"""A Guru Meditation Report Mixin/Base Class
This class is a base class for Guru Meditation Reports.
It provides facilities for registering sections and
setting up functionality to auto-run the report on
a certain signal.
This class should always be used in conjunction with
a Report class via multiple inheritance. It should
always come first in the class list to ensure the
MRO is correct.
"""
timestamp_fmt = "%Y%m%d%H%M%S"
def __init__(self, version_obj, sig_handler_tb=None, *args, **kwargs):
self.version_obj = version_obj
self.traceback = sig_handler_tb
super(GuruMeditation, self).__init__(*args, **kwargs)
self.start_section_index = len(self.sections)
@classmethod
def register_section(cls, section_title, generator):
"""Register a New Section
This method registers a persistent section for the current
class.
:param str section_title: the title of the section
:param generator: the generator for the section
"""
try:
cls.persistent_sections.append([section_title, generator])
except AttributeError:
cls.persistent_sections = [[section_title, generator]]
@classmethod
def setup_autorun(cls, version, service_name=None,
log_dir=None, signum=None):
"""Set Up Auto-Run
This method sets up the Guru Meditation Report to automatically
get dumped to stderr or a file in a given dir when the given signal
is received.
:param version: the version object for the current product
:param service_name: this program name used to construct logfile name
:param logdir: path to a log directory where to create a file
:param signum: the signal to associate with running the report
"""
if not signum and hasattr(signal, 'SIGUSR1'):
# SIGUSR1 is not supported on all platforms
signum = signal.SIGUSR1
if signum:
signal.signal(signum,
lambda sn, tb: cls.handle_signal(
version, service_name, log_dir, tb))
@classmethod
def handle_signal(cls, version, service_name, log_dir, traceback):
"""The Signal Handler
This method (indirectly) handles receiving a registered signal and
dumping the Guru Meditation Report to stderr or a file in a given dir.
If service name and log dir are not None, the report will be dumped to
a file named $service_name_gurumeditation_$current_time in the log_dir
directory.
This method is designed to be curried into a proper signal handler by
currying out the version
parameter.
:param version: the version object for the current product
:param service_name: this program name used to construct logfile name
:param logdir: path to a log directory where to create a file
:param traceback: the traceback provided to the signal handler
"""
try:
res = cls(version, traceback).run()
except Exception:
print("Unable to run Guru Meditation Report!",
file=sys.stderr)
else:
if log_dir:
service_name = service_name or os.path.basename(
inspect.stack()[-1][1])
filename = "%s_gurumeditation_%s" % (
service_name, timeutils.strtime(fmt=cls.timestamp_fmt))
filepath = os.path.join(log_dir, filename)
try:
with open(filepath, "w") as dumpfile:
dumpfile.write(res)
except Exception:
print("Unable to dump Guru Meditation Report to file %s" %
(filepath,), file=sys.stderr)
else:
print(res, file=sys.stderr)
def _readd_sections(self):
del self.sections[self.start_section_index:]
self.add_section('Package',
pgen.PackageReportGenerator(self.version_obj))
self.add_section('Threads',
tgen.ThreadReportGenerator(self.traceback))
self.add_section('Green Threads',
tgen.GreenThreadReportGenerator())
self.add_section('Processes',
prgen.ProcessReportGenerator())
self.add_section('Configuration',
cgen.ConfigReportGenerator())
try:
for section_title, generator in self.persistent_sections:
self.add_section(section_title, generator)
except AttributeError:
pass
def run(self):
self._readd_sections()
return super(GuruMeditation, self).run()
# GuruMeditation must come first to get the correct MRO
class TextGuruMeditation(GuruMeditation, report.TextReport):
"""A Text Guru Meditation Report
This report is the basic human-readable Guru Meditation Report
It contains the following sections by default
(in addition to any registered persistent sections):
- Package Information
- Threads List
- Green Threads List
- Process List
- Configuration Options
:param version_obj: the version object for the current product
:param traceback: an (optional) frame object providing the actual
traceback for the current thread
"""
def __init__(self, version_obj, traceback=None):
super(TextGuruMeditation, self).__init__(version_obj, traceback,
'Guru Meditation')
|
|
"""Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
with np.errstate(divide='ignore'):
dist = 1. / dist
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, **kwargs):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_kwds = kwargs
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric in ['wminkowski', 'minkowski']:
self.metric_kwds['p'] = p
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
self.effective_metric_ = self.metric
self.effective_metric_kwds_ = self.metric_kwds
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
self.effective_metric_kwds_ = self.metric_kwds.copy()
p = self.effective_metric_kwds_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_kwds_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_kwds_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_kwds_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = check_array(X, accept_sparse='csr')
if n_neighbors is None:
n_neighbors = self.n_neighbors
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_kwds_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
j = np.arange(neigh_ind.shape[0])[:, None]
neigh_ind = neigh_ind[j, np.argsort(dist[j, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
return np.sqrt(dist[j, neigh_ind]), neigh_ind
else:
return dist[j, neigh_ind], neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
return result
else:
raise ValueError("internal: _fit_method not recognized")
def kneighbors_graph(self, X, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if n_neighbors is None:
n_neighbors = self.n_neighbors
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones((n_samples1, n_neighbors))
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
data, ind = self.kneighbors(X, n_neighbors + 1,
return_distance=True)
A_data, A_ind = data[:, 1:], ind[:, 1:]
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point or points
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the euclidean distances to each point,
only present if return_distance=True.
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 1.5, 0.5]]...), array([[1, 2]]...)
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = check_array(X, accept_sparse='csr')
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_kwds_)
neigh_ind = [np.where(d < radius)[0] for d in dist]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
neigh_ind = np.asarray(neigh_ind, dtype=int)
dtype_F = float
except ValueError:
neigh_ind = np.asarray(neigh_ind, dtype='object')
dtype_F = object
if return_distance:
if self.effective_metric_ == 'euclidean':
dist = np.array([np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)],
dtype=dtype_F)
else:
dist = np.array([d[neigh_ind[i]]
for i, d in enumerate(dist)],
dtype=dtype_F)
return dist, neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
ind, dist = results
return dist, ind
else:
return results
else:
raise ValueError("internal: _fit_method not recognized")
def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if radius is None:
radius = self.radius
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_neighbors = np.array([len(a) for a in A_ind])
n_nonzero = np.sum(n_neighbors)
if A_data is None:
A_data = np.ones(n_nonzero)
A_ind = np.concatenate(list(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
|
|
from collections import defaultdict
from enum import Enum
from sklearn.base import ClassifierMixin, RegressorMixin, BaseEstimator
from sklearn_pmml import pmml
from sklearn_pmml.convert.utils import pmml_row, assert_equal
from sklearn_pmml.convert.features import *
from pyxb.utils.domutils import BindingDOMSupport as bds
import numpy as np
class TransformationContext(object):
"""
Context holder object
"""
def __init__(self, schemas=None):
"""
:type schemas: dict[Schema, list[Feature]] | None
"""
if schemas is None:
schemas = {}
self.schemas = schemas
class ModelMode(Enum):
CLASSIFICATION = 'classification'
REGRESSION = 'regression'
class Schema(Enum):
INPUT = ('input', True, True)
"""
Schema used to define input variables. Short names allowed
"""
OUTPUT = ('output', True, True)
"""
Schema used to define output variables. Short names allowed. For the categorical variables the continuous
probability variables will be automatically created as <feature_name>.<feature_value>
"""
DERIVED = ('derived', False, False)
"""
Schema used to define derived features. Short names not allowed due to potential overlap with input variables.
"""
NUMERIC = ('numeric', False, False)
"""
Schema used to encode categorical features as numbers. Short names not allowed due to their overlap with
input variables
"""
MODEL = ('model', True, False)
"""
Schema used to define features fed into the sklearn estimator.
Short names allowed because these variables are not going into PMML.
"""
INTERNAL = ('internal', False, True)
"""
This schema may be used by complex converters to hide the variables used for internal needs
(e.g. the raw predictions of GBRT)
"""
CATEGORIES = ('categories', True, False)
"""
This schema is used to extend categorical outputs with probabilities of categories
"""
def __init__(self, name, short_names_allowed, data_dict_elibigle):
self._name = name
self._short_names_allowed = short_names_allowed
self._data_dict_elibigle = data_dict_elibigle
@property
def namespace(self):
"""
The namespace corresponding to the schema
"""
return self._name
@property
def short_names_allowed(self):
"""
The schema allows usage of short names instead of fully-qualified names
"""
return self._short_names_allowed
@property
def eligible_for_data_dictionary(self):
"""
The variables defined in the schema should appear in the DataDictionary
"""
return self._data_dict_elibigle
def extract_feature_name(self, f):
"""
Extract the printed name of the feature.
:param f: feature to work with
:type f: Feature|str
"""
if self.short_names_allowed:
if isinstance(f, str):
return f
else:
return f.full_name
else:
return "{}.{}".format(self.namespace, f if isinstance(f, str) else f.full_name)
class EstimatorConverter(object):
"""
A new base class for the estimator converters
"""
EPSILON = 0.00001
SCHEMAS_IN_MINING_MODEL = {Schema.INPUT, Schema.INTERNAL}
def __init__(self, estimator, context, mode):
self.model_function = mode
self.estimator = estimator
self.context = context
assert not any(isinstance(_, DerivedFeature) for _ in context.schemas[Schema.INPUT]), \
'Input schema represents the input fields only'
assert all(isinstance(_, DerivedFeature) for _ in context.schemas[Schema.DERIVED]), \
'Derived schema represents the set of automatically generated fields'
assert not any(isinstance(_, DerivedFeature) for _ in context.schemas[Schema.OUTPUT]), \
'Only regular features allowed in output schema; use Output transformation if you want to transform values'
# create a new schema for categories probabilities
categories = []
for feature in context.schemas[Schema.OUTPUT]:
if isinstance(feature, CategoricalFeature):
for value in feature.value_list:
categories.append(RealNumericFeature(
name=value,
namespace=feature.name
))
context.schemas[Schema.CATEGORIES] = categories
def data_dictionary(self):
"""
Build a data dictionary and return a DataDictionary element.
DataDictionary contains feature types for all variables used in the PMML,
except the ones defined as Derived Features
"""
dd = pmml.DataDictionary()
for schema, fields in sorted(self.context.schemas.items(), key=lambda x: x[0].name):
assert isinstance(schema, Schema)
if schema.eligible_for_data_dictionary:
for f in fields:
data_field = pmml.DataField(
dataType=f.data_type.value,
name=schema.extract_feature_name(f),
optype=f.optype.value)
dd.DataField.append(data_field)
if isinstance(f, CategoricalFeature):
for v in f.value_list:
data_field.append(pmml.Value(value_=v))
return dd
def output(self):
"""
Output section of PMML contains all model outputs.
:return: pmml.Output
"""
output = pmml.Output()
# the response variables
for feature in self.context.schemas[Schema.OUTPUT]:
output_field = pmml.OutputField(
name=Schema.OUTPUT.extract_feature_name(feature),
feature='predictedValue'
)
output.append(output_field)
return output
def transformation_dictionary(self):
"""
Build a transformation dictionary and return a TransformationDictionary element
"""
td = pmml.TransformationDictionary()
# define a schema with all variables available for a model
encoded_schema = []
self.context.schemas[Schema.NUMERIC] = encoded_schema
idx = {}
# First, populate transformation dictionary for _all_ derived fields, because they can be requested later
for f in self.context.schemas[Schema.DERIVED]:
ef = RealNumericFeature(name=f.name)
df = pmml.DerivedField(
name=ef.full_name,
optype=ef.optype.value,
dataType=ef.data_type.value
)
df.append(f.transformation)
td.append(df)
assert f.name not in idx, 'Duplicate field definition: {}'.format(f.name)
idx[f.name] = ef
# second, define the numeric transformations for the categorical variables
for f in self.context.schemas[Schema.INPUT]:
assert f.name not in idx, 'Duplicate field definition: {}'.format(f.name)
if isinstance(f, CategoricalFeature):
ef = RealNumericFeature(name=f.name, namespace=Schema.NUMERIC.namespace)
# create a record in transformation dictionary with mapping from raw values into numbers
df = pmml.DerivedField(
name=ef.full_name,
optype=ef.optype.value,
dataType=ef.data_type.value
)
mv = pmml.MapValues(outputColumn='output', dataType=ef.data_type.value)
mv.append(pmml.FieldColumnPair(field=f.full_name, column='input'))
it = pmml.InlineTable()
for i, v in enumerate(f.value_list):
it.append(pmml_row(input=v, output=i))
td.append(df.append(mv.append(it)))
idx[f.name] = ef
else:
idx[f.name] = f
# now we can build a mirror of model schema into the numeric schema
self.context.schemas[Schema.NUMERIC] = [idx[f.name] for f in self.context.schemas[Schema.MODEL]]
return td
def model(self, verification_data=None):
"""
Build a mining model and return one of the MODEL-ELEMENTs
"""
pass
def model_verification(self, verification_data):
"""
Use the input verification_data, apply the transformations, evaluate the model response and produce the
ModelVerification element
:param verification_data: list of dictionaries or data frame
:type verification_data: dict[str, object]|pd.DataFrame
:return: ModelVerification element
"""
verification_data = pd.DataFrame(verification_data)
assert len(verification_data) > 0, 'Verification data can not be empty'
verification_input = pd.DataFrame(index=verification_data.index)
verification_model_input = pd.DataFrame(index=verification_data.index)
for key in self.context.schemas[Schema.INPUT]:
# all input features MUST be present in the verification_data
assert key.full_name in verification_data.columns, 'Missing input field "{}"'.format(key.full_name)
verification_input[Schema.INPUT.extract_feature_name(key)] = verification_data[key.full_name]
if isinstance(key, CategoricalFeature):
verification_model_input[Schema.INPUT.extract_feature_name(key)] = np.vectorize(key.to_number)(verification_data[key.full_name])
else:
verification_model_input[Schema.INPUT.extract_feature_name(key)] = verification_data[key.full_name]
for key in self.context.schemas[Schema.DERIVED]:
assert isinstance(key, DerivedFeature), 'Only DerivedFeatures are allowed in the DERIVED schema'
verification_model_input[key.full_name] = key.apply(verification_input)
# at this point we can check that MODEL schema contains only known features
for key in self.context.schemas[Schema.MODEL]:
assert Schema.MODEL.extract_feature_name(key) in verification_model_input.columns, \
'Unknown feature "{}" in the MODEL schema'.format(key.full_name)
# TODO: we can actually support multiple columns, but need to figure out the way to extract the data
# TODO: from the estimator properly
# building model results
assert len(self.context.schemas[Schema.OUTPUT]) == 1, 'Only one output is currently supported'
key = self.context.schemas[Schema.OUTPUT][0]
model_input = verification_model_input[list(map(Schema.MODEL.extract_feature_name, self.context.schemas[Schema.MODEL]))].values
model_results = np.vectorize(key.from_number)(self.estimator.predict(X=model_input))
verification_data_output_result = np.vectorize(key.from_number)(verification_data[key.full_name].values)
if key.full_name in verification_data:
# make sure that if results are provided, the expected and actual values are equal
assert_equal(key, model_results, verification_data_output_result)
verification_input[Schema.OUTPUT.extract_feature_name(key)] = model_results
if isinstance(key, CategoricalFeature):
probabilities = self.estimator.predict_proba(X=model_input)
for i, key in enumerate(self.context.schemas[Schema.CATEGORIES]):
verification_input[Schema.CATEGORIES.extract_feature_name(key)] = probabilities[:, i]
fields = []
field_names = []
for s in [Schema.INPUT, Schema.OUTPUT, Schema.CATEGORIES]:
fields += self.context.schemas[s]
field_names += list(map(s.extract_feature_name, self.context.schemas[s]))
mv = pmml.ModelVerification(recordCount=len(verification_input), fieldCount=len(fields))
# step one: build verification schema
verification_fields = pmml.VerificationFields()
for key in fields:
if isinstance(key, NumericFeature):
vf = pmml.VerificationField(field=key.name, column=key.name, precision=self.EPSILON)
else:
vf = pmml.VerificationField(field=key.name, column=key.name)
verification_fields.append(vf)
mv.append(verification_fields)
# step two: build data table
it = pmml.InlineTable()
for data in verification_input.iterrows():
data = data[1]
row = pmml.row()
row_empty = True
for key in field_names:
if verification_input[key].dtype == object or not np.isnan(data[key]):
col = bds().createChildElement(key)
bds().appendTextChild(data[key], col)
row.append(col)
row_empty = False
if not row_empty:
it.append(row)
mv.append(it)
return mv
def mining_schema(self):
"""
Mining schema contains the model input features.
NOTE: In order to avoid duplicates, I've decided to remove output features from MiningSchema
NOTE: We don't need to specify any DERIVED/NUMERIC fields here, because PMML interpreter will create them
in a lazy manner.
"""
ms = pmml.MiningSchema()
if Schema.INPUT in self.SCHEMAS_IN_MINING_MODEL:
for f in sorted(self.context.schemas[Schema.INPUT], key=lambda _: _.full_name):
ms.append(pmml.MiningField(invalidValueTreatment=f.invalid_value_treatment.value, name=f.full_name))
for s in [Schema.OUTPUT, Schema.INTERNAL]:
if s in self.SCHEMAS_IN_MINING_MODEL:
for f in self.context.schemas.get(s, []):
ms.append(pmml.MiningField(
name=s.extract_feature_name(f),
usageType="predicted"
))
return ms
def header(self):
"""
Build and return Header element
"""
return pmml.Header()
def pmml(self, verification_data=None):
"""
Build PMML from the context and estimator.
Returns PMML element
"""
p = pmml.PMML(version="4.2")
p.append(self.header())
p.append(self.data_dictionary())
p.append(self.transformation_dictionary())
p.append(self.model(verification_data))
return p
class ClassifierConverter(EstimatorConverter):
"""
Base class for classifier converters.
It is required that the output schema contains only categorical features.
The serializer will output result labels as output::feature_name and probabilities for each value of result feature
as output::feature_name::feature_value.
"""
def __init__(self, estimator, context):
"""
:param estimator: Estimator to convert
:type estimator: BaseEstimator
:param context: context to work with
:type context: TransformationContext
"""
super(ClassifierConverter, self).__init__(estimator, context, ModelMode.CLASSIFICATION)
assert isinstance(estimator, ClassifierMixin), 'Classifier converter should only be applied to the classification models'
for f in context.schemas[Schema.OUTPUT]:
assert isinstance(f, CategoricalFeature), 'Only categorical outputs are supported for classification task'
# create hidden variables for each categorical output
internal_schema = list(filter(lambda x: isinstance(x, CategoricalFeature), self.context.schemas[Schema.OUTPUT]))
self.context.schemas[Schema.INTERNAL] = internal_schema
def output(self):
"""
Output section of PMML contains all model outputs.
Classification tree output contains output variable as a label,
and <variable>.<value> as a probability of a value for a variable
:return: pmml.Output
"""
output = pmml.Output()
# the response variables
for feature in self.context.schemas[Schema.OUTPUT]:
output_field = pmml.OutputField(
name=Schema.OUTPUT.extract_feature_name(feature),
feature='predictedValue',
optype=feature.optype.value,
dataType=feature.data_type.value
)
output.append(output_field)
# the probabilities for categories; should only be populated for classification jobs
for feature in self.context.schemas[Schema.CATEGORIES]:
output_field = pmml.OutputField(
name=Schema.CATEGORIES.extract_feature_name(feature),
optype=feature.optype.value,
dataType=feature.data_type.value,
feature='probability',
targetField=Schema.INTERNAL.extract_feature_name(feature.namespace),
value_=feature.name
)
output.append(output_field)
return output
class RegressionConverter(EstimatorConverter):
def __init__(self, estimator, context):
super(RegressionConverter, self).__init__(estimator, context, ModelMode.REGRESSION)
|
|
"""
This module assumes that OpenCanary has been installed and is running.
In particular it assumes that OpenCanary is logging to /var/tmp/opencanary.log
and that the services it's testing are enabled.
It would be much better to setup tests to start the services needed and provide
the configuration files so that tests can be run without needing to reinstall
and start the service before each test. It would also be better to be able to
test the code directly rather than relying on the out put of logs.
Still this is a start.
"""
import time
import json
from ftplib import FTP, error_perm
import unittest
import socket
import warnings # Used in the TestSSHModule (see comment there)
# These libraries are only needed by the test suite and so aren't in the
# OpenCanary requirements, there is a requirements.txt file in the tests folder
# Simply run `pip install -r opencanary/test/requirements.txt`
import requests
import paramiko
import pymysql
import git
def get_last_log():
"""
Gets the last line from `/var/tmp/opencanary.log` as a dictionary
"""
with open('/var/tmp/opencanary.log', 'r') as log_file:
return json.loads(log_file.readlines()[-1])
class TestFTPModule(unittest.TestCase):
"""
Tests the cases for the FTP module.
The FTP server should not allow logins and should log each attempt.
"""
def setUp(self):
self.ftp = FTP('localhost')
def test_anonymous_ftp(self):
"""
Try to connect to the FTP service with no username or password.
"""
self.assertRaises(error_perm, self.ftp.login)
log = get_last_log()
self.assertEqual(log['dst_port'], 21)
self.assertEqual(log['logdata']['USERNAME'], "anonymous")
self.assertEqual(log['logdata']['PASSWORD'], "anonymous@")
def test_authenticated_ftp(self):
"""
Connect to the FTP service with a test username and password.
"""
self.assertRaises(error_perm,
self.ftp.login,
user='test_user',
passwd='test_pass')
last_log = get_last_log()
self.assertEqual(last_log['dst_port'], 21)
self.assertEqual(last_log['logdata']['USERNAME'], "test_user")
self.assertEqual(last_log['logdata']['PASSWORD'], "test_pass")
def tearDown(self):
self.ftp.close()
class TestGitModule(unittest.TestCase):
"""
Tests the Git Module by trying to clone a repository from localhost.
"""
def setUp(self):
self.repository = git.Repo
def test_clone_a_repository(self):
self.assertRaises(git.exc.GitCommandError,
self.repository.clone_from,
'git://localhost/test.git',
'/tmp/git_test')
def test_log_git_clone(self):
"""
Check that the git clone attempt was logged
"""
# This test must be run after the test_clone_a_repository.
# Unless we add an attempt to clone into this test, or the setup.
last_log = get_last_log()
self.assertEqual(last_log['logdata']['HOST'], "localhost")
self.assertEqual(last_log['logdata']['REPO'], "test.git")
class TestHTTPModule(unittest.TestCase):
"""
Tests the cases for the HTTP module.
The HTTP server should look like a NAS and present a login box, any
interaction with the server (GET, POST) should be logged.
"""
def test_get_http_home_page(self):
"""
Simply get the home page.
"""
request = requests.get('http://localhost/')
self.assertEqual(request.status_code, 200)
self.assertIn('Synology RackStation', request.text)
last_log = get_last_log()
self.assertEqual(last_log['dst_port'], 80)
self.assertEqual(last_log['logdata']['HOSTNAME'], "localhost")
self.assertEqual(last_log['logdata']['PATH'], "/index.html")
self.assertIn('python-requests', last_log['logdata']['USERAGENT'])
def test_log_in_to_http_with_basic_auth(self):
"""
Try to log into the site with basic auth.
"""
request = requests.post('http://localhost/', auth=('user', 'pass'))
# Currently the web server returns 200, but in future it should return
# a 403 statuse code.
self.assertEqual(request.status_code, 200)
self.assertIn('Synology RackStation', request.text)
last_log = get_last_log()
self.assertEqual(last_log['dst_port'], 80)
self.assertEqual(last_log['logdata']['HOSTNAME'], "localhost")
self.assertEqual(last_log['logdata']['PATH'], "/index.html")
self.assertIn('python-requests', last_log['logdata']['USERAGENT'])
# OpenCanary doesn't currently record credentials from basic auth.
def test_log_in_to_http_with_parameters(self):
"""
Try to log into the site by posting the parameters
"""
login_data = {
'username': 'test_user',
'password': 'test_pass',
'OTPcode': '',
'rememberme': '',
'__cIpHeRtExt': '',
'isIframeLogin': 'yes'}
request = requests.post('http://localhost/index.html', data=login_data)
# Currently the web server returns 200, but in future it should return
# a 403 status code.
self.assertEqual(request.status_code, 200)
self.assertIn('Synology RackStation', request.text)
last_log = get_last_log()
self.assertEqual(last_log['dst_port'], 80)
self.assertEqual(last_log['logdata']['HOSTNAME'], "localhost")
self.assertEqual(last_log['logdata']['PATH'], "/index.html")
self.assertIn('python-requests', last_log['logdata']['USERAGENT'])
self.assertEqual(last_log['logdata']['USERNAME'], "test_user")
self.assertEqual(last_log['logdata']['PASSWORD'], "test_pass")
def test_get_directory_listing(self):
"""
Try to get a directory listing should result in a 403 Forbidden message.
"""
request = requests.get('http://localhost/css/')
self.assertEqual(request.status_code, 403)
self.assertIn('Forbidden', request.text)
# These request are not logged at the moment. Maybe we should.
def test_get_non_existent_file(self):
"""
Try to get a file that doesn't exist should give a 404 error message.
"""
request = requests.get('http://localhost/this/file/doesnt_exist.txt')
self.assertEqual(request.status_code, 404)
self.assertIn('Not Found', request.text)
# These request are not logged at the moment. Maybe we should.
def test_get_supporting_image_file(self):
"""
Try to download a supporting image file
"""
request = requests.get('http://localhost/img/synohdpack/images/Components/checkbox.png')
# Just an arbitrary image
self.assertEqual(request.status_code, 200)
class TestSSHModule(unittest.TestCase):
"""
Tests the cases for the SSH server
"""
def setUp(self):
self.connection = paramiko.SSHClient()
self.connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def test_ssh_with_basic_login(self):
"""
Try to log into the SSH server
"""
# FIXME: At the time of this writing, paramiko calls cryptography
# which throws a depreciation warning. It looks like this has been
# fixed https://github.com/paramiko/paramiko/issues/1369 but the fix
# hasn't been pushed to pypi. When the fix is pushed we can update
# and remove the import warnings and the warnings.catch.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(paramiko.ssh_exception.AuthenticationException,
self.connection.connect,
hostname="localhost",
port=22,
username="test_user",
password="test_pass")
last_log = get_last_log()
self.assertEqual(last_log['dst_port'], 22)
self.assertIn('paramiko', last_log['logdata']['REMOTEVERSION'])
self.assertEqual(last_log['logdata']['USERNAME'], "test_user")
self.assertEqual(last_log['logdata']['PASSWORD'], "test_pass")
def tearDown(self):
self.connection.close()
class TestNTPModule(unittest.TestCase):
"""
Tests the NTP server. The server doesn't respond, but it will log attempts
to trigger the MON_GETLIST_1 NTP commands, which is used for DDOS attacks.
"""
def setUp(self):
packet = (
b'\x17' + # response more version mode
b'\x00' + # sequence number
b'\x03' + # implementation (NTPv3)
b'\x2a' + # request (MON_GETLIST_1)
b'\x00' + # error number / number of data items
b'\x00' + # item_size
b'\x00' # data
)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.sendto(packet, ('localhost', 123))
def test_ntp_server_monlist(self):
"""
Check that the MON_GETLIST_1 NTP command was logged correctly
"""
# The logs take about a second to show up, in other tests this is not
# an issue, because there are checks that run before looking at the log
# (e.g. request.status_code == 200 for HTTP) but for NTP we just check
# the log. A hardcoded time out is a horible solution, but it works.
time.sleep(1)
last_log = get_last_log()
self.assertEqual(last_log['logdata']['NTP CMD'], "monlist")
self.assertEqual(last_log['dst_port'], 123)
def tearDown(self):
self.sock.close()
class TestMySQLModule(unittest.TestCase):
"""
Tests the MySQL Server attempting to login should fail and
"""
def test_mysql_server_login(self):
"""
Login to the mysql server
"""
self.assertRaises(pymysql.err.OperationalError,
pymysql.connect,
host="localhost",
user="test_user",
password="test_pass",
db='db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
last_log = get_last_log()
self.assertEqual(last_log['logdata']['USERNAME'], "test_user")
self.assertEqual(last_log['logdata']['PASSWORD'], "b2e5ed6a0e59f99327399ced2009338d5c0fe237")
self.assertEqual(last_log['dst_port'], 3306)
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import absolute_import
import os
import sys
import argparse
import subprocess
import shutil
import time
from cass_functions import (cassandra_query, get_data_dir, get_keyspaces,
get_table_directories, get_dir_structure)
from cleaner import data_cleaner
def parse_cmd():
parser = argparse.ArgumentParser(description='Snapshot Restoration')
parser.add_argument('-d', '--path',
type=check_dir,
required=True,
help="Specify path to load snapshots"
)
parser.add_argument('-n', '--node', '--host',
required=True,
nargs='+',
help="Specify host address(es)"
) # TODO still not sure why sstableloader would need more than 1 host
parser.add_argument('-k', '-ks', '--keyspace',
required=False,
nargs='+',
help="Specify keyspace(s)"
)
parser.add_argument('-tb', '-t', '--table', '-cf', '--column_family',
required=False,
nargs='+',
help="Specify table(s)"
)
parser.add_argument('-y',
required=False,
action='store_true',
help="Destroy existing database without prompt"
)
return parser.parse_args()
def check_cassandra(host):
# TODO better way to check if host option is valid?
# If there are no system keyspaces that can be retrieved, there is a problem
# with the host input, or there is a problem with Cassandra
ks = get_keyspaces(host, system=True)
if len(ks) == 0:
raise Exception('Cannot find system keyspaces, invalid host')
return True
def check_dir(folder):
if not os.path.isdir(folder):
raise argparse.ArgumentTypeError('Directory does not exist')
if os.access(folder, os.R_OK):
return folder
else:
raise argparse.ArgumentTypeError('Directory is not readable')
def restore_schema(host, load_path, keyspace):
# This function opens the schema files in each keyspace and writes it in
# cqlsh
schema_location = load_path + '/' + keyspace + '/' + keyspace + '_schema.cql'
if not os.path.exists(schema_location):
raise Exception('Schema not found: %s' % schema_location)
with open(schema_location, 'r') as f:
cassandra_query(host, f.read())
def destroy_schema(host, flag=None):
success = False
destroy = False
keyspaces = get_keyspaces(host)
if len(keyspaces) > 0:
print('Removing keyspaces:')
for k in keyspaces:
print('\t' + k)
if not flag: # check if user wahts to destroy listed keyspaces
option = raw_input('Destroy keyspaces? [y/n]')
if option == 'y' or option == 'Y':
destroy = True
elif flag == '-y':
destroy = True
else: # should never happen
raise Exception('Invalid flag parameter')
if destroy:
for k in keyspaces: # drop old keyspaces
print('Dropping keyspace: %s' % k)
cassandra_query(host, 'DROP KEYSPACE %s;' % k)
data_dir = get_data_dir()
active_dirs = os.listdir(data_dir)
print('Removing old keyspace directories')
for d in active_dirs:
if d in keyspaces:
print('Removing keyspace directory: %s/%s' % (data_dir, d))
shutil.rmtree(data_dir + '/' + d)
success = True
else:
success = True
return success
def restore(hosts, load_path, keyspace_arg = None, table_arg = None,
y_flag=None):
print('Checking Cassandra status . . .')
try:
subprocess.check_output(['nodetool', 'status'])
except:
raise Exception('Cassandra has not yet started')
# keyspaces inside snapshot directory
avaliable_keyspaces = filter(lambda x: os.path.isdir(load_path + '/' + x), \
os.listdir(load_path))
print('Checking keyspace arguments')
if keyspace_arg:
for keyspace in keyspace_arg:
if keyspace not in avaliable_keyspaces:
raise Exception('Keyspace "%s" not in snapshot folder' % keyspace)
load_keyspaces = keyspace_arg
else:
load_keyspaces = avaliable_keyspaces
print('Checking table arguments . . .')
if table_arg:
if not keyspace_arg or len(keyspace_arg) != 1:
raise Exception('Only one keyspace can be specified with table arg')
for tb in table_arg:
if tb not in os.listdir(load_path + '/' + load_keyspaces[0]):
raise Exception('Table "%s" not found in keyspace "%s"'
% (tb, load_keyspaces[0]))
else:
load_tables = set(table_arg)
else:
print('No table arguments.')
print('Valid arguments.\n')
print('Destroying existing database')
if not destroy_schema(hosts[0], y_flag):
print('Unable to destroy previous data, exiting script')
sys.exit(0)
# delete old keyspace directories
data_cleaner(hosts[0])
for keyspace in load_keyspaces:
print('Creating schema for %s' % keyspace)
restore_schema(hosts[0], load_path, keyspace)
# keyspaces just created by schema
existing_keyspaces = get_keyspaces(hosts[0])
# basic schema in a json format
structure = get_dir_structure(hosts[0], existing_keyspaces)
for keyspace in load_keyspaces:
print('Loading keyspace "%s"' % keyspace)
if not table_arg:
load_tables = filter(
lambda x: os.path.isdir(load_path + '/' + keyspace + '/' + x),
os.listdir(load_path + '/' + keyspace)
)
existing_tables = structure[keyspace].keys()
for table in load_tables:
if table not in existing_tables:
raise Exception('Table not in schema, error with snapshot')
load_table_dir = load_path + '/' + keyspace + '/' + table
print('\n\nLoading table: %s' % table)
# sstableloader has been more stable than nodetool refresh
subprocess.call(['/bin/sstableloader',
'-d', ', '.join(hosts),
load_table_dir])
print('Restoration complete')
if __name__ == '__main__':
cmds = parse_cmd()
if cmds.path.endswith('\\') or cmds.path.endswith('/'):
load_path = cmds.path[:-1]
else:
load_path = cmds.path
if len(cmds.node) == 0:
raise Exception('Node/host ip required. See restore.py -h for details.')
start = time.time()
check_cassandra(cmds.node[0])
restore(cmds.node, load_path, cmds.keyspace, cmds.table, cmds.y)
end = time.time()
print('Elapsed time: %s' % (end - start))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import exception
from nova import flags
from nova import utils
from nova.network import manager as network_manager
HOST = "testhost"
FLAGS = flags.FLAGS
class FakeIptablesFirewallDriver(object):
def __init__(self, **kwargs):
pass
def setattr(self, key, val):
self.__setattr__(key, val)
def apply_instance_filter(self, instance, network_info):
pass
class FakeVIFDriver(object):
def __init__(self, **kwargs):
pass
def setattr(self, key, val):
self.__setattr__(key, val)
def plug(self, instance, network, mapping):
return {
'id': 'fake',
'bridge_name': 'fake',
'mac_address': 'fake',
'ip_address': 'fake',
'dhcp_server': 'fake',
'extra_params': 'fake',
}
class FakeModel(dict):
"""Represent a model from the db"""
def __init__(self, *args, **kwargs):
self.update(kwargs)
def __getattr__(self, name):
return self[name]
class FakeNetworkManager(network_manager.NetworkManager):
"""This NetworkManager doesn't call the base class so we can bypass all
inherited service cruft and just perform unit tests.
"""
class FakeDB:
def fixed_ip_get_by_instance(self, context, instance_id):
return [dict(address='10.0.0.0'), dict(address='10.0.0.1'),
dict(address='10.0.0.2')]
def network_get_by_cidr(self, context, cidr):
raise exception.NetworkNotFoundForCidr()
def network_create_safe(self, context, net):
fakenet = dict(net)
fakenet['id'] = 999
return fakenet
def network_get_all(self, context):
raise exception.NoNetworksFound()
def virtual_interface_get_all(self, context):
floats = [{'address': '172.16.1.1'},
{'address': '172.16.1.2'},
{'address': '173.16.1.2'}]
vifs = [{'instance_id': 0,
'fixed_ipv6': '2001:db8::dcad:beff:feef:1',
'fixed_ips': [{'address': '172.16.0.1',
'floating_ips': [floats[0]]}]},
{'instance_id': 20,
'fixed_ipv6': '2001:db8::dcad:beff:feef:2',
'fixed_ips': [{'address': '172.16.0.2',
'floating_ips': [floats[1]]}]},
{'instance_id': 30,
'fixed_ipv6': '2002:db8::dcad:beff:feef:2',
'fixed_ips': [{'address': '173.16.0.2',
'floating_ips': [floats[2]]}]}]
return vifs
def instance_get_id_to_uuid_mapping(self, context, ids):
# NOTE(jkoelker): This is just here until we can rely on UUIDs
mapping = {}
for id in ids:
mapping[id] = str(utils.gen_uuid())
return mapping
def __init__(self):
self.db = self.FakeDB()
self.deallocate_called = None
def deallocate_fixed_ip(self, context, address):
self.deallocate_called = address
def _create_fixed_ips(self, context, network_id):
pass
flavor = {'id': 0,
'name': 'fake_flavor',
'memory_mb': 2048,
'vcpus': 2,
'local_gb': 10,
'flavor_id': 0,
'swap': 0,
'rxtx_quota': 0,
'rxtx_cap': 3}
def fake_network(network_id, ipv6=None):
if ipv6 is None:
ipv6 = FLAGS.use_ipv6
fake_network = {'id': network_id,
'label': 'test%d' % network_id,
'injected': False,
'multi_host': False,
'cidr': '192.168.%d.0/24' % network_id,
'cidr_v6': None,
'netmask': '255.255.255.0',
'netmask_v6': None,
'bridge': 'fake_br%d' % network_id,
'bridge_interface': 'fake_eth%d' % network_id,
'gateway': '192.168.%d.1' % network_id,
'gateway_v6': None,
'broadcast': '192.168.%d.255' % network_id,
'dns1': '192.168.%d.3' % network_id,
'dns2': '192.168.%d.4' % network_id,
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.%d.2' % network_id}
if ipv6:
fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id
fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id
fake_network['netmask_v6'] = '64'
return fake_network
def vifs(n):
for x in xrange(n):
yield {'id': x,
'address': 'DE:AD:BE:EF:00:%02x' % x,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x,
'network_id': x,
'network': FakeModel(**fake_network(x)),
'instance_id': 0}
def floating_ip_ids():
for i in xrange(99):
yield i
def fixed_ip_ids():
for i in xrange(99):
yield i
floating_ip_id = floating_ip_ids()
fixed_ip_id = fixed_ip_ids()
def next_fixed_ip(network_id, num_floating_ips=0):
next_id = fixed_ip_id.next()
f_ips = [FakeModel(**next_floating_ip(next_id))
for i in xrange(num_floating_ips)]
return {'id': next_id,
'network_id': network_id,
'address': '192.168.%d.1%02d' % (network_id, next_id),
'instance_id': 0,
'allocated': False,
# and since network_id and vif_id happen to be equivalent
'virtual_interface_id': network_id,
'floating_ips': f_ips}
def next_floating_ip(fixed_ip_id):
next_id = floating_ip_id.next()
return {'id': next_id,
'address': '10.10.10.1%02d' % next_id,
'fixed_ip_id': fixed_ip_id,
'project_id': None,
'auto_assigned': False}
def ipv4_like(ip, match_string):
ip = ip.split('.')
match_octets = match_string.split('.')
for i, octet in enumerate(match_octets):
if octet == '*':
continue
if octet != ip[i]:
return False
return True
def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
floating_ips_per_fixed_ip=0):
# stubs is the self.stubs from the test
# ips_per_vif is the number of ips each vif will have
# num_floating_ips is number of float ips for each fixed ip
network = network_manager.FlatManager(host=HOST)
network.db = db
# reset the fixed and floating ip generators
global floating_ip_id, fixed_ip_id
floating_ip_id = floating_ip_ids()
fixed_ip_id = fixed_ip_ids()
def fixed_ips_fake(*args, **kwargs):
return [next_fixed_ip(i, floating_ips_per_fixed_ip)
for i in xrange(num_networks) for j in xrange(ips_per_vif)]
def virtual_interfaces_fake(*args, **kwargs):
return [vif for vif in vifs(num_networks)]
def instance_type_fake(*args, **kwargs):
return flavor
stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake)
stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake)
stubs.Set(db, 'instance_type_get', instance_type_fake)
return network.get_instance_nw_info(None, 0, 0, None)
|
|
import copy
import datetime
import logging
import pandas as pd
from qstrader import settings
from qstrader.broker.portfolio.portfolio_event import PortfolioEvent
from qstrader.broker.portfolio.position_handler import PositionHandler
class Portfolio(object):
"""
Represents a portfolio of assets. It contains a cash
account with the ability to subscribe and withdraw funds.
It also contains a list of positions in assets, encapsulated
by a PositionHandler instance.
Parameters
----------
start_dt : datetime
Portfolio creation datetime.
starting_cash : float, optional
Starting cash of the portfolio. Defaults to 100,000 USD.
currency: str, optional
The portfolio denomination currency.
portfolio_id: str, optional
An identifier for the portfolio.
name: str, optional
The human-readable name of the portfolio.
"""
def __init__(
self,
start_dt,
starting_cash=0.0,
currency="USD",
portfolio_id=None,
name=None
):
"""
Initialise the Portfolio object with a PositionHandler,
an event history, along with cash balance. Make sure
the portfolio denomination currency is also set.
"""
self.start_dt = start_dt
self.current_dt = start_dt
self.starting_cash = starting_cash
self.currency = currency
self.portfolio_id = portfolio_id
self.name = name
self.pos_handler = PositionHandler()
self.history = []
self.logger = logging.getLogger('Portfolio')
self.logger.setLevel(logging.DEBUG)
self.logger.info(
'(%s) Portfolio "%s" instance initialised' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id
)
)
self._initialise_portfolio_with_cash()
def _initialise_portfolio_with_cash(self):
"""
Initialise the portfolio with a (default) currency Cash Asset
with quantity equal to 'starting_cash'.
"""
self.cash = copy.copy(self.starting_cash)
if self.starting_cash > 0.0:
self.history.append(
PortfolioEvent.create_subscription(
self.current_dt, self.starting_cash, self.starting_cash
)
)
self.logger.info(
'(%s) Funds subscribed to portfolio "%s" '
'- Credit: %0.2f, Balance: %0.2f' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id,
round(self.starting_cash, 2),
round(self.starting_cash, 2)
)
)
@property
def total_market_value(self):
"""
Obtain the total market value of the portfolio excluding cash.
"""
return self.pos_handler.total_market_value()
@property
def total_equity(self):
"""
Obtain the total market value of the portfolio including cash.
"""
return self.total_market_value + self.cash
@property
def total_unrealised_pnl(self):
"""
Calculate the sum of all the positions' unrealised P&Ls.
"""
return self.pos_handler.total_unrealised_pnl()
@property
def total_realised_pnl(self):
"""
Calculate the sum of all the positions' realised P&Ls.
"""
return self.pos_handler.total_realised_pnl()
@property
def total_pnl(self):
"""
Calculate the sum of all the positions' total P&Ls.
"""
return self.pos_handler.total_pnl()
def subscribe_funds(self, dt, amount):
"""
Credit funds to the portfolio.
"""
if dt < self.current_dt:
raise ValueError(
'Subscription datetime (%s) is earlier than '
'current portfolio datetime (%s). Cannot '
'subscribe funds.' % (dt, self.current_dt)
)
self.current_dt = dt
if amount < 0.0:
raise ValueError(
'Cannot credit negative amount: '
'%s to the portfolio.' % amount
)
self.cash += amount
self.history.append(
PortfolioEvent.create_subscription(self.current_dt, amount, self.cash)
)
self.logger.info(
'(%s) Funds subscribed to portfolio "%s" '
'- Credit: %0.2f, Balance: %0.2f' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id, round(amount, 2),
round(self.cash, 2)
)
)
def withdraw_funds(self, dt, amount):
"""
Withdraw funds from the portfolio if there is enough
cash to allow it.
"""
# Check that amount is positive and that there is
# enough in the portfolio to withdraw the funds
if dt < self.current_dt:
raise ValueError(
'Withdrawal datetime (%s) is earlier than '
'current portfolio datetime (%s). Cannot '
'withdraw funds.' % (dt, self.current_dt)
)
self.current_dt = dt
if amount < 0:
raise ValueError(
'Cannot debit negative amount: '
'%0.2f from the portfolio.' % amount
)
if amount > self.cash:
raise ValueError(
'Not enough cash in the portfolio to '
'withdraw. %s withdrawal request exceeds '
'current portfolio cash balance of %s.' % (
amount, self.cash
)
)
self.cash -= amount
self.history.append(
PortfolioEvent.create_withdrawal(self.current_dt, amount, self.cash)
)
self.logger.info(
'(%s) Funds withdrawn from portfolio "%s" '
'- Debit: %0.2f, Balance: %0.2f' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id, round(amount, 2),
round(self.cash, 2)
)
)
def transact_asset(self, txn):
"""
Adjusts positions to account for a transaction.
"""
if txn.dt < self.current_dt:
raise ValueError(
'Transaction datetime (%s) is earlier than '
'current portfolio datetime (%s). Cannot '
'transact assets.' % (txn.dt, self.current_dt)
)
self.current_dt = txn.dt
txn_share_cost = txn.price * txn.quantity
txn_total_cost = txn_share_cost + txn.commission
if txn_total_cost > self.cash:
if settings.PRINT_EVENTS:
print(
'WARNING: Not enough cash in the portfolio to '
'carry out transaction. Transaction cost of %s '
'exceeds remaining cash of %s. Transaction '
'will proceed with a negative cash balance.' % (
txn_total_cost, self.cash
)
)
self.pos_handler.transact_position(txn)
self.cash -= txn_total_cost
# Form Portfolio history details
direction = "LONG" if txn.direction > 0 else "SHORT"
description = "%s %s %s %0.2f %s" % (
direction, txn.quantity, txn.asset.upper(),
txn.price, datetime.datetime.strftime(txn.dt, "%d/%m/%Y")
)
if direction == "LONG":
pe = PortfolioEvent(
dt=txn.dt, type='asset_transaction',
description=description,
debit=round(txn_total_cost, 2), credit=0.0,
balance=round(self.cash, 2)
)
self.logger.info(
'(%s) Asset "%s" transacted LONG in portfolio "%s" '
'- Debit: %0.2f, Balance: %0.2f' % (
txn.dt.strftime(settings.LOGGING["DATE_FORMAT"]),
txn.asset, self.portfolio_id,
round(txn_total_cost, 2), round(self.cash, 2)
)
)
else:
pe = PortfolioEvent(
dt=txn.dt, type='asset_transaction',
description=description,
debit=0.0, credit=-1.0 * round(txn_total_cost, 2),
balance=round(self.cash, 2)
)
self.logger.info(
'(%s) Asset "%s" transacted SHORT in portfolio "%s" '
'- Credit: %0.2f, Balance: %0.2f' % (
txn.dt.strftime(settings.LOGGING["DATE_FORMAT"]),
txn.asset, self.portfolio_id,
-1.0 * round(txn_total_cost, 2), round(self.cash, 2)
)
)
self.history.append(pe)
def portfolio_to_dict(self):
"""
Output the portfolio holdings information as a dictionary
with Assets as keys and sub-dictionaries as values.
This excludes cash.
Returns
-------
`dict`
The portfolio holdings.
"""
holdings = {}
for asset, pos in self.pos_handler.positions.items():
holdings[asset] = {
"quantity": pos.net_quantity,
"market_value": pos.market_value,
"unrealised_pnl": pos.unrealised_pnl,
"realised_pnl": pos.realised_pnl,
"total_pnl": pos.total_pnl
}
return holdings
def update_market_value_of_asset(
self, asset, current_price, current_dt
):
"""
Update the market value of the asset to the current
trade price and date.
"""
if asset not in self.pos_handler.positions:
return
else:
if current_price < 0.0:
raise ValueError(
'Current trade price of %s is negative for '
'asset %s. Cannot update position.' % (
current_price, asset
)
)
if current_dt < self.current_dt:
raise ValueError(
'Current trade date of %s is earlier than '
'current date %s of asset %s. Cannot update '
'position.' % (
current_dt, self.current_dt, asset
)
)
self.pos_handler.positions[asset].update_current_price(
current_price, current_dt
)
def history_to_df(self):
"""
Creates a Pandas DataFrame of the Portfolio history.
"""
records = [pe.to_dict() for pe in self.history]
return pd.DataFrame.from_records(
records, columns=[
"date", "type", "description", "debit", "credit", "balance"
]
).set_index(keys=["date"])
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import division
import datetime
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.usage import quotas
class BaseUsage(object):
show_terminated = False
def __init__(self, request, project_id=None):
self.project_id = project_id or request.user.tenant_id
self.request = request
self.summary = {}
self.usage_list = []
self.limits = {}
self.quotas = {}
@property
def today(self):
return timezone.now()
@staticmethod
def get_start(year, month, day):
start = datetime.datetime(year, month, day, 0, 0, 0)
return timezone.make_aware(start, timezone.utc)
@staticmethod
def get_end(year, month, day):
end = datetime.datetime(year, month, day, 23, 59, 59)
return timezone.make_aware(end, timezone.utc)
def get_instances(self):
instance_list = []
[instance_list.extend(u.server_usages) for u in self.usage_list]
return instance_list
def get_date_range(self):
if not hasattr(self, "start") or not hasattr(self, "end"):
args_start = (self.today.year, self.today.month, 1)
args_end = (self.today.year, self.today.month, self.today.day)
form = self.get_form()
if form.is_valid():
start = form.cleaned_data['start']
end = form.cleaned_data['end']
args_start = (start.year,
start.month,
start.day)
args_end = (end.year,
end.month,
end.day)
elif form.is_bound:
messages.error(self.request,
_("Invalid date format: "
"Using today as default."))
self.start = self.get_start(*args_start)
self.end = self.get_end(*args_end)
return self.start, self.end
def init_form(self):
today = datetime.date.today()
self.start = datetime.date(day=1, month=today.month, year=today.year)
self.end = today
return self.start, self.end
def get_form(self):
if not hasattr(self, 'form'):
req = self.request
start = req.GET.get('start', req.session.get('usage_start'))
end = req.GET.get('end', req.session.get('usage_end'))
if start and end:
# bound form
self.form = forms.DateForm({'start': start, 'end': end})
else:
# non-bound form
init = self.init_form()
start = init[0].isoformat()
end = init[1].isoformat()
self.form = forms.DateForm(initial={'start': start,
'end': end})
req.session['usage_start'] = start
req.session['usage_end'] = end
return self.form
def _get_neutron_usage(self, limits, resource_name):
resource_map = {
'floatingip': {
'api': api.network.tenant_floating_ip_list,
'limit_name': 'totalFloatingIpsUsed',
'message': _('Unable to retrieve floating IP addresses.')
},
'security_group': {
'api': api.network.security_group_list,
'limit_name': 'totalSecurityGroupsUsed',
'message': _('Unable to retrieve security groups.')
}
}
resource = resource_map[resource_name]
try:
method = resource['api']
current_used = len(method(self.request))
except Exception:
current_used = 0
msg = resource['message']
exceptions.handle(self.request, msg)
limits[resource['limit_name']] = current_used
def _set_neutron_limit(self, limits, neutron_quotas, resource_name):
limit_name_map = {
'floatingip': 'maxTotalFloatingIps',
'security_group': 'maxSecurityGroups',
}
if neutron_quotas is None:
resource_max = float("inf")
else:
resource_max = getattr(neutron_quotas.get(resource_name),
'limit', float("inf"))
if resource_max == -1:
resource_max = float("inf")
limits[limit_name_map[resource_name]] = resource_max
def get_neutron_limits(self):
if not api.base.is_service_enabled(self.request, 'network'):
return
try:
neutron_quotas_supported = (
api.neutron.is_quotas_extension_supported(self.request))
neutron_sg_used = (
api.neutron.is_extension_supported(self.request,
'security-group'))
if api.network.floating_ip_supported(self.request):
self._get_neutron_usage(self.limits, 'floatingip')
if neutron_sg_used:
self._get_neutron_usage(self.limits, 'security_group')
# Quotas are an optional extension in Neutron. If it isn't
# enabled, assume the floating IP limit is infinite.
if neutron_quotas_supported:
neutron_quotas = api.neutron.tenant_quota_get(self.request,
self.project_id)
else:
neutron_quotas = None
except Exception:
# Assume neutron security group and quotas are enabled
# because they are enabled in most Neutron plugins.
neutron_sg_used = True
neutron_quotas = None
msg = _('Unable to retrieve network quota information.')
exceptions.handle(self.request, msg)
self._set_neutron_limit(self.limits, neutron_quotas, 'floatingip')
if neutron_sg_used:
self._set_neutron_limit(self.limits, neutron_quotas,
'security_group')
def get_cinder_limits(self):
"""Get volume limits if cinder is enabled."""
if not api.base.is_service_enabled(self.request, 'volume'):
return
try:
self.limits.update(api.cinder.tenant_absolute_limits(self.request))
except Exception:
msg = _("Unable to retrieve volume limit information.")
exceptions.handle(self.request, msg)
return
def get_limits(self):
try:
self.limits = api.nova.tenant_absolute_limits(self.request)
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve limit information."))
self.get_neutron_limits()
self.get_cinder_limits()
def get_usage_list(self, start, end):
return []
def summarize(self, start, end):
if not api.nova.extension_supported('SimpleTenantUsage', self.request):
return
if start <= end and start <= self.today:
# The API can't handle timezone aware datetime, so convert back
# to naive UTC just for this last step.
start = timezone.make_naive(start, timezone.utc)
end = timezone.make_naive(end, timezone.utc)
try:
self.usage_list = self.get_usage_list(start, end)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve usage information.'))
elif end < start:
messages.error(self.request,
_("Invalid time period. The end date should be "
"more recent than the start date."))
elif start > self.today:
messages.error(self.request,
_("Invalid time period. You are requesting "
"data from the future which may not exist."))
for project_usage in self.usage_list:
project_summary = project_usage.get_summary()
for key, value in project_summary.items():
self.summary.setdefault(key, 0)
self.summary[key] += value
def get_quotas(self):
try:
self.quotas = quotas.tenant_quota_usages(self.request)
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve quota information."))
def csv_link(self):
form = self.get_form()
data = {}
if hasattr(form, "cleaned_data"):
data = form.cleaned_data
if not ('start' in data and 'end' in data):
data = {"start": self.today.date(), "end": self.today.date()}
return "?start=%s&end=%s&format=csv" % (data['start'],
data['end'])
class GlobalUsage(BaseUsage):
show_terminated = True
def get_usage_list(self, start, end):
return api.nova.usage_list(self.request, start, end)
class ProjectUsage(BaseUsage):
attrs = ('memory_mb', 'vcpus', 'uptime',
'hours', 'local_gb')
def get_usage_list(self, start, end):
show_terminated = self.request.GET.get('show_terminated',
self.show_terminated)
instances = []
terminated_instances = []
usage = api.nova.usage_get(self.request, self.project_id, start, end)
# Attribute may not exist if there are no instances
if hasattr(usage, 'server_usages'):
now = self.today
for server_usage in usage.server_usages:
# This is a way to phrase uptime in a way that is compatible
# with the 'timesince' filter. (Use of local time intentional.)
server_uptime = server_usage['uptime']
total_uptime = now - datetime.timedelta(seconds=server_uptime)
server_usage['uptime_at'] = total_uptime
if server_usage['ended_at'] and not show_terminated:
terminated_instances.append(server_usage)
else:
instances.append(server_usage)
usage.server_usages = instances
return (usage,)
|
|
from Tkinter import *
from idlelib import SearchEngine
from idlelib.SearchDialogBase import SearchDialogBase
import re
def replace(text):
root = text._root()
engine = SearchEngine.get(root)
if not hasattr(engine, "_replacedialog"):
engine._replacedialog = ReplaceDialog(root, engine)
dialog = engine._replacedialog
dialog.open(text)
class ReplaceDialog(SearchDialogBase):
title = "Replace Dialog"
icon = "Replace"
def __init__(self, root, engine):
SearchDialogBase.__init__(self, root, engine)
self.replvar = StringVar(root)
def open(self, text):
SearchDialogBase.open(self, text)
try:
first = text.index("sel.first")
except TclError:
first = None
try:
last = text.index("sel.last")
except TclError:
last = None
first = first or text.index("insert")
last = last or first
self.show_hit(first, last)
self.ok = 1
def create_entries(self):
SearchDialogBase.create_entries(self)
self.replent = self.make_entry("Replace with:", self.replvar)[0]
def create_command_buttons(self):
SearchDialogBase.create_command_buttons(self)
self.make_button("Find", self.find_it)
self.make_button("Replace", self.replace_it)
self.make_button("Replace+Find", self.default_command, 1)
self.make_button("Replace All", self.replace_all)
def find_it(self, event=None):
self.do_find(0)
def replace_it(self, event=None):
if self.do_find(self.ok):
self.do_replace()
def default_command(self, event=None):
if self.do_find(self.ok):
if self.do_replace(): # Only find next match if replace succeeded.
# A bad re can cause a it to fail.
self.do_find(0)
def _replace_expand(self, m, repl):
""" Helper function for expanding a regular expression
in the replace field, if needed. """
if self.engine.isre():
try:
new = m.expand(repl)
except re.error:
self.engine.report_error(repl, 'Invalid Replace Expression')
new = None
else:
new = repl
return new
def replace_all(self, event=None):
prog = self.engine.getprog()
if not prog:
return
repl = self.replvar.get()
text = self.text
res = self.engine.search_text(text, prog)
if not res:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.tag_remove("hit", "1.0", "end")
line = res[0]
col = res[1].start()
if self.engine.iswrap():
line = 1
col = 0
ok = 1
first = last = None
# XXX ought to replace circular instead of top-to-bottom when wrapping
text.undo_block_start()
while 1:
res = self.engine.search_forward(text, prog, line, col, 0, ok)
if not res:
break
line, m = res
chars = text.get("%d.0" % line, "%d.0" % (line+1))
orig = m.group()
new = self._replace_expand(m, repl)
if new is None:
break
i, j = m.span()
first = "%d.%d" % (line, i)
last = "%d.%d" % (line, j)
if new == orig:
text.mark_set("insert", last)
else:
text.mark_set("insert", first)
if first != last:
text.delete(first, last)
if new:
text.insert(first, new)
col = i + len(new)
ok = 0
text.undo_block_stop()
if first and last:
self.show_hit(first, last)
self.close()
def do_find(self, ok=0):
if not self.engine.getprog():
return False
text = self.text
res = self.engine.search_text(text, None, ok)
if not res:
text.bell()
return False
line, m = res
i, j = m.span()
first = "%d.%d" % (line, i)
last = "%d.%d" % (line, j)
self.show_hit(first, last)
self.ok = 1
return True
def do_replace(self):
prog = self.engine.getprog()
if not prog:
return False
text = self.text
try:
first = pos = text.index("sel.first")
last = text.index("sel.last")
except TclError:
pos = None
if not pos:
first = last = pos = text.index("insert")
line, col = SearchEngine.get_line_col(pos)
chars = text.get("%d.0" % line, "%d.0" % (line+1))
m = prog.match(chars, col)
if not prog:
return False
new = self._replace_expand(m, self.replvar.get())
if new is None:
return False
text.mark_set("insert", first)
text.undo_block_start()
if m.group():
text.delete(first, last)
if new:
text.insert(first, new)
text.undo_block_stop()
self.show_hit(first, text.index("insert"))
self.ok = 0
return True
def show_hit(self, first, last):
text = self.text
text.mark_set("insert", first)
text.tag_remove("sel", "1.0", "end")
text.tag_add("sel", first, last)
text.tag_remove("hit", "1.0", "end")
if first == last:
text.tag_add("hit", first)
else:
text.tag_add("hit", first, last)
text.see("insert")
text.update_idletasks()
def close(self, event=None):
SearchDialogBase.close(self, event)
self.text.tag_remove("hit", "1.0", "end")
def _replace_dialog(parent):
root = Tk()
root.title("Test ReplaceDialog")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
# mock undo delegator methods
def undo_block_start():
pass
def undo_block_stop():
pass
text = Text(root)
text.undo_block_start = undo_block_start
text.undo_block_stop = undo_block_stop
text.pack()
text.insert("insert","This is a sample string.\n"*10)
def show_replace():
text.tag_add(SEL, "1.0", END)
replace(text)
text.tag_remove(SEL, "1.0", END)
button = Button(root, text="Replace", command=show_replace)
button.pack()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_replace_dialog)
|
|
import unittest
from mock import MagicMock
from mock import patch
from mock import call
import jbosscli
from jbosscli import Jbosscli
from jbosscli import ServerGroup
from jbosscli import Deployment
from jbosscli import ServerInstance
from jbosscli import CliError
class TestJbosscli(unittest.TestCase):
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
@patch("jbosscli.Jbosscli._invoke_cli", MagicMock())
def test_get_assigned_deployments_standalone_should_not_include_path_in_command(self):
controller = Jbosscli("", "a:b")
controller.domain = False
controller.get_assigned_deployments()
jbosscli.Jbosscli._invoke_cli.assert_called_with({"operation":"read-children-resources", "child-type":"deployment"})
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
@patch("jbosscli.Jbosscli._invoke_cli", MagicMock())
def test_get_assigned_deployments_domain_should_include_path_in_command(self):
controller = Jbosscli("", "a:b")
controller.domain = True
group = jbosscli.ServerGroup("test-server-group", [])
controller.get_assigned_deployments(group)
jbosscli.Jbosscli._invoke_cli.assert_called_with({
"operation": "read-children-resources",
"child-type": "deployment",
"address": [
"server-group",
"test-server-group"
]
})
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
@patch("jbosscli.Jbosscli._invoke_cli", MagicMock())
def test_get_assigned_deployments_domain_should_return_deployments(self):
controller = Jbosscli("", "a:b")
controller.domain = True
controller._invoke_cli = MagicMock(
return_value={
"abce-version": {
"enabled": True,
"name": "abce-version",
"runtime-name": "abce.war"
},
"ecba-version": {
"enabled": False,
"name": "ecba-version",
"runtime-name": "ecba.war"
}
}
)
group = jbosscli.ServerGroup("test-server-group", [])
expected_deployments = [
Deployment("abce-version", "abce.war", enabled=True, server_group=group),
Deployment("ecba-version", "ecba.war", enabled=False, server_group=group)
]
actual_deployments = controller.get_assigned_deployments(group)
self.assertEqual(actual_deployments, expected_deployments)
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
@patch("jbosscli.Jbosscli._invoke_cli", MagicMock())
def test_get_assigned_deployments_domain_no_server_group_should_return_all_deployments(self):
controller = Jbosscli("", "a:b")
controller.domain = True
controller._get_all_assigned_deployments = MagicMock()
controller.get_assigned_deployments()
controller._get_all_assigned_deployments.assert_called_once_with()
@patch("jbosscli.requests.post", MagicMock())
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
def test_get_all_assigned_deployments(self):
cli = Jbosscli("host:port", "a:b")
cli.domain = True
cli._invoke_cli = MagicMock(
return_value={
"outcome": "success",
"result": [
"server-group1",
"other-server-group"
]
}
)
cli.get_server_groups = MagicMock(
return_value=[
ServerGroup("server-group1", [
Deployment("abce-version", "abce.war", enabled=True),
Deployment("ecba-version", "ecba.war", enabled=False)
]),
ServerGroup("server-group2", [
Deployment("abce-version2", "abce.war", enabled=True),
Deployment("ecba-version2", "ecba.war", enabled=False)
])
]
)
deployments = cli._get_all_assigned_deployments()
expected_deployments = [
Deployment("abce-version", "abce.war", enabled=True),
Deployment("ecba-version", "ecba.war", enabled=False),
Deployment("abce-version2", "abce.war", enabled=True),
Deployment("ecba-version2", "ecba.war", enabled=False)
]
self.assertEqual(deployments, expected_deployments)
@patch("jbosscli.Jbosscli._invoke_cli", MagicMock(return_value={
"name-version": {
"content": {},
"name": "name-version",
"runtime-name": "name.war"
},
"othername-version": {
"content": {},
"name": "othername-version",
"runtime-name": "othername.war"
}
}))
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
def test_get_all_deployments(self):
cli = Jbosscli("a:b", "pass")
deployments = cli._get_all_deployments()
cli._invoke_cli.assert_called_with({
"operation": "read-children-resources",
"child-type": "deployment"
})
expected_deployments = [
Deployment("name-version", "name.war"),
Deployment("othername-version", "othername.war")
]
deployments.sort(key=lambda d: d.name)
self.assertEqual(deployments, expected_deployments)
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
def test_fetch_context_root_domain_single_instance(self):
cli = Jbosscli("a:b", "pass")
cli.domain = True
cli.instances = [ServerInstance("someinstance", "somehost")]
cli._invoke_cli = MagicMock(
return_value="/abcd")
deployment = Deployment("abcd-version", "abcd.war")
context_root = cli.fecth_context_root(deployment)
cli._invoke_cli.assert_called_once_with({
"operation": "read-attribute",
"name": "context-root",
"address": [
"host", "somehost",
"server", "someinstance",
"deployment", "abcd-version",
"subsystem", "web"
]
})
self.assertEqual(context_root, "/abcd")
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
def test_fetch_context_root_domain_two_instances_should_search_both(self):
cli = Jbosscli("a:b", "pass")
cli.domain = True
cli.instances = [
ServerInstance("someinstance", "somehost"),
ServerInstance("otherinstance", "somehost")
]
cli._invoke_cli = MagicMock(
side_effect=[
CliError("Boom!"),
"/abcd"
])
deployment = Deployment("abcd-version", "abcd.war")
context_root = cli.fecth_context_root(deployment)
calls = [
call({
"operation": "read-attribute",
"name": "context-root",
"address": [
"host", "somehost",
"server", "someinstance",
"deployment", "abcd-version",
"subsystem", "web"
]
}),
call({
"operation": "read-attribute",
"name": "context-root",
"address": [
"host", "somehost",
"server", "otherinstance",
"deployment", "abcd-version",
"subsystem", "web"
]
})
]
cli._invoke_cli.assert_has_calls(calls)
self.assertEqual(context_root, "/abcd")
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
def test_fetch_context_root_domain_two_empty_instances_should_search_both(self):
cli = Jbosscli("a:b", "pass")
cli.domain = True
cli.instances = [
ServerInstance("someinstance", "somehost"),
ServerInstance("otherinstance", "somehost")
]
cli._invoke_cli = MagicMock(
side_effect=[
CliError("Boom!"),
CliError("Boom!")
])
deployment = Deployment("abcd-version", "abcd.war")
context_root = cli.fecth_context_root(deployment)
calls = [
call({
"operation": "read-attribute",
"name": "context-root",
"address": [
"host", "somehost",
"server", "someinstance",
"deployment", "abcd-version",
"subsystem", "web"
]
}),
call({
"operation": "read-attribute",
"name": "context-root",
"address": [
"host", "somehost",
"server", "otherinstance",
"deployment", "abcd-version",
"subsystem", "web"
]
})
]
cli._invoke_cli.assert_has_calls(calls)
self.assertIsNone(context_root)
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
def test_fetch_context_root_standalone(self):
cli = Jbosscli("a:b", "pass")
cli.domain = False
cli._invoke_cli = MagicMock(
return_value="/abcd"
)
deployment = Deployment("abcd-version", "abcd.war")
context_root = cli.fecth_context_root(deployment)
cli._invoke_cli.assert_called_once_with({
"operation": "read-attribute",
"name": "context-root",
"address": [
"deployment", "abcd-version",
"subsystem", "web"
]
})
self.assertEqual(context_root, "/abcd")
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
def test_fetch_context_root_standalone_inexisting_deployment_should_return_None(self):
cli = Jbosscli("a:b", "pass")
cli.domain = False
cli._invoke_cli = MagicMock(
side_effect=CliError('Boom!')
)
deployment = Deployment("abcd-version", "abcd.war")
context_root = cli.fecth_context_root(deployment)
cli._invoke_cli.assert_called_once_with({
"operation": "read-attribute",
"name": "context-root",
"address": [
"deployment", "abcd-version",
"subsystem", "web"
]
})
self.assertIsNone(context_root)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/python
import ConfigParser
from collections import namedtuple
from funcs import *
from Config import *
from FsUtil import *
from HawqUtil import *
from HiveUtil import *
from HdfsUtil import *
from PostgresUtil import *
from UserUtil import *
if len(sys.argv) == 1:
print "Available tools:"
print "\tphd-metrics.py report"
print "\tphd-metrics.py fs-util"
print "\tphd-metrics.py hawq-util"
print "\tphd-metrics.py hdfs-util"
print "\tphd-metrics.py hive-util"
print "\tphd-metrics.py user-util"
print "\tphd-metrics.py pg-util"
sys.exit(0)
if __name__ == "__main__":
# Validate tools and user
try:
out = getCommandOutput("which hdfs")
except:
printError("`which hdfs` returned a non-zero exit code. Make sur eyou are using this utility from an HDFS node")
sys.exit(1)
if getCommandOutput("whoami") != "gpadmin":
printError("Please execute this utility as gpadmin")
sys.exit(2)
## Report option
if sys.argv[1] == "report":
parser = OptionParser()
parser.add_option("-c", "--config", dest="configFile", help="Configuration file (default phd-metrics.ini)", default="phd-metrics.ini")
parser.add_option("-s", "--sqlfile", dest="sqlFile", help="Filename to write SQL statements to (default none)", default=None)
conf = Config(parser, sys.argv[2:])
pgutil = PostgresUtil(conf)
pgutil.open()
HdfsUtil(conf).printReport()
HawqUtil(conf).printReport()
HiveUtil(conf).printReport()
UserUtil(conf).printReport()
FsUtil(conf).printReport()
pgutil.close()
# Local filesystem option
elif sys.argv[1] == "fs-util":
parser = OptionParser()
parser.add_option("-c", "--config", dest="configFile", help="Configuration file (default phd-metrics.ini)", default="phd-metrics.ini")
parser.add_option("-a", "--action", dest="action", help="Choose an action: report", default=None)
conf = Config(parser, sys.argv[2:])
fsUtil = FsUtil(conf)
if conf.get(Config.ACTION) == 'report':
fsUtil.printReport();
else:
printError("Unknown action %s" % (conf.get(Config.ACTION)))
# HAWQ option
elif sys.argv[1] == "hawq-util":
parser = OptionParser()
parser.add_option("-c", "--config", dest="configFile", help="Configuration file (default phd-metrics.ini)", default="phd-metrics.ini")
parser.add_option("-a", "--action", dest="action", help="Choose an action: report, get, set, clear", default=None)
parser.add_option("-d", "--database", dest="database", help="Database to get or set (Only for get/set/clear actions)", default=None)
parser.add_option("-q", "--quota", dest="quota", help="Database quota, in bytes. Keep in mind the 3x replication. (Only for set action)", default=None)
(options, args) = parser.parse_args(sys.argv[2:])
conf = Config(parser, sys.argv[2:])
hawqUtil = HawqUtil(conf)
### Main program
segDirs = hawqUtil.getSegmentDirs()
if conf.get(Config.ACTION) == 'report':
hawqUtil.printReport();
elif conf.get(Config.ACTION)== 'get':
hawqUtil.printDatabaseQuota(conf.get(Config.DATABASE))
elif conf.get(Config.ACTION) == 'set':
try:
quota = int(conf.get(Config.QUOTA_VALUE))
except:
quota = human2bytes(conf.get(Config.QUOTA_VALUE).upper())
if query_yes_no("Are you sure you want to set the %s database's quota to %s bytes? This could have a negative effect on this HAWQ database." % (conf.get(Config.DATABASE), quota), default="no"):
hawqUtil.setDatabaseQuota(conf.get(Config.DATABASE), quota)
hawqUtil.printDatabaseQuota(conf.get(Config.DATABASE))
elif conf.get(Config.ACTION) == 'clear' and query_yes_no("Are you sure you want to clear the %s database's quota?" % (conf.get(Config.DATABASE)), default="no"):
hawqUtil.clearDatabaseQuota(conf.get(Config.DATABASE))
hawqUtil.printDatabaseQuota(conf.get(Config.DATABASE))
else:
printError("Unknown action %s" % (conf.get(Config.ACTION)))
# HDFS option
elif sys.argv[1] == "hdfs-util":
parser = OptionParser()
parser.add_option("-c", "--config", dest="configFile", help="Configuration file (default phd-metrics.ini)", default="phd-metrics.ini")
parser.add_option("-a", "--action", dest="action", help="Choose an action: report", default=None)
(options, args) = parser.parse_args(sys.argv[2:])
conf = Config(parser, sys.argv[2:])
hdfsUtil = HdfsUtil(conf)
### Main program
if conf.get(Config.ACTION) == 'report':
hdfsUtil.printReport()
else:
printError("Unknown action %s" % (conf.get(Config.ACTION)))
# Hive option
elif sys.argv[1] == "hive-util":
parser = OptionParser()
parser.add_option("-c", "--config", dest="configFile", help="Configuration file (default phd-metrics.ini)", default="phd-metrics.ini")
parser.add_option("-a", "--action", dest="action", help="Choose an action: report, get, set, clear", default=None)
parser.add_option("-d", "--database", dest="database", help="Database to get or set (Only for get/set actions)", default=None)
parser.add_option("-q", "--quota", dest="quota", help="Database quota, in bytes. Keep in mind the 3x replication. (Only for set action)", default=None)
(options, args) = parser.parse_args(sys.argv[2:])
conf = Config(parser, sys.argv[2:])
hiveUtil = HiveUtil(conf)
### Main program
if conf.get(Config.ACTION) == 'report':
hiveUtil.printReport()
elif conf.get(Config.ACTION)== 'get':
hiveUtil.printDatabaseQuota(conf.get(Config.DATABASE))
elif conf.get(Config.ACTION) == 'set':
try:
quota = int(conf.get(Config.QUOTA_VALUE))
except:
quota = human2bytes(conf.get(Config.QUOTA_VALUE).upper())
if query_yes_no("Are you sure you want to set the %s database's quota to %s bytes? This could have a negative effect on this Hive database." % (conf.get(Config.DATABASE), quota), default="no"):
hiveUtil.setDatabaseQuota(conf.get(Config.DATABASE), quota)
hiveUtil.printDatabaseQuota(conf.get(Config.DATABASE))
elif conf.get(Config.ACTION) == 'clear' and query_yes_no("Are you sure you want to clear the %s database's quota?" % (conf.get(Config.DATABASE)), default="no"):
hiveUtil.clearDatabaseQuota(conf.get(Config.DATABASE))
hiveUtil.printDatabaseQuota(conf.get(Config.DATABASE))
else:
printError("Unknown action %s" % (conf.get(Config.ACTION)))
# User option
elif sys.argv[1] == "user-util":
parser = OptionParser()
parser.add_option("-c", "--config", dest="configFile", help="Configuration file (default phd-metrics.ini)", default="phd-metrics.ini")
parser.add_option("-a", "--action", dest="action", help="Choose an action: report, get, set, clear", default=None)
parser.add_option("-u", "--user", dest="user", help="User name (Only for get/set /clear actions)", default=None)
parser.add_option("-q", "--quota", dest="quota", help="User quota, in bytes. Keep in mind the 3x replication. (Only for set action)", default=None)
parser.add_option("-t", "--type", dest="quotaType", help="The type of quota to get, set, or clear: 'inode' or 'space'", default=None)
conf = Config(parser, sys.argv[2:])
userUtil = UserUtil(conf)
### Main program
if conf.get(Config.ACTION) == 'report':
userUtil.printReport()
elif conf.get(Config.ACTION)== 'get':
if conf.get(Config.QUOTA_TYPE) == 'space':
userUtil.printUserSpaceQuota(conf.get(Config.USER))
elif conf.get(Config.QUOTA_TYPE) == 'inode':
userUtil.printUserINodeQuota(conf.get(Config.USER))
elif conf.get(Config.ACTION) == 'set':
try:
quota = int(conf.get(Config.QUOTA_VALUE))
except:
# assume this is in a human readable form if initial conversion failed
quota = human2bytes(conf.get(Config.QUOTA_VALUE).upper())
if conf.get(Config.QUOTA_TYPE) == 'space':
if query_yes_no("Are you sure you want to set %s's quota to %s bytes?" % (conf.get(Config.USER), quota), default="no"):
userUtil.setUserSpaceQuota(conf.get(Config.USER), quota)
userUtil.printUserSpaceQuota(conf.get(Config.USER))
elif conf.get(Config.QUOTA_TYPE) == 'inode':
if query_yes_no("Are you sure you want to set %s's quota to %s inodes?" % (conf.get(Config.USER), quota), default="no"):
userUtil.setUserINodeQuota(conf.get(Config.USER), quota)
userUtil.printUserINodeQuota(conf.get(Config.USER))
elif conf.get(Config.ACTION) == 'clear':
if conf.get(Config.QUOTA_TYPE) == 'space' and query_yes_no("Are you sure you want to clear %s's space quota?" % (conf.get(Config.USER)), default="no"):
userUtil.clearUserSpaceQuota(conf.get(Config.USER))
userUtil.printUserSpaceQuota(conf.get(Config.USER))
elif conf.get(Config.QUOTA_TYPE) == 'inode' and query_yes_no("Are you sure you want to clear %s's inode quota?" % (conf.get(Config.USER)), default="no"):
userUtil.clearUserINodeQuota(conf.get(Config.USER))
userUtil.printUserINodeQuota(conf.get(Config.USER))
else:
printError("Unknown action %s" % (conf.get(Config.ACTION)))
# postgres option
elif sys.argv[1] == "pg-util":
parser = OptionParser()
parser.add_option("-c", "--config", dest="configFile", help="Configuration file (default phd-metrics.ini)", default="phd-metrics.ini")
parser.add_option("-s", "--sqlfile", dest="sqlFile", help="Filename to write SQL statements to (default none)", default=None)
conf = Config(parser, sys.argv[2:])
try:
conf.get(Config.SQL_FILE)
except KeyError:
printError("Must specify --sqlfile option for pg-util tool")
pgutil = PostgresUtil(conf)
pgutil.open()
pgutil.writeCreates()
pgutil.close()
else:
printError("Unknown tool")
sys.exit(0)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for type-hint objects and decorators - Python 3 syntax specific.
"""
# pytype: skip-file
from __future__ import absolute_import
import typing
import unittest
import apache_beam as beam
from apache_beam import typehints
class MainInputTest(unittest.TestCase):
def assertStartswith(self, msg, prefix):
self.assertTrue(
msg.startswith(prefix), '"%s" does not start with "%s"' % (msg, prefix))
def test_typed_dofn_method(self):
class MyDoFn(beam.DoFn):
def process(self, element: int) -> typehints.Tuple[str]:
return tuple(str(element))
result = [1, 2, 3] | beam.ParDo(MyDoFn())
self.assertEqual(['1', '2', '3'], sorted(result))
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*int.*got.*str'):
_ = ['a', 'b', 'c'] | beam.ParDo(MyDoFn())
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*int.*got.*str'):
_ = [1, 2, 3] | (beam.ParDo(MyDoFn()) | 'again' >> beam.ParDo(MyDoFn()))
def test_typed_dofn_method_with_class_decorators(self):
# Class decorators take precedence over PEP 484 hints.
@typehints.with_input_types(typehints.Tuple[int, int])
@typehints.with_output_types(int)
class MyDoFn(beam.DoFn):
def process(self, element: int) -> typehints.Tuple[str]:
yield element[0]
result = [(1, 2)] | beam.ParDo(MyDoFn())
self.assertEqual([1], sorted(result))
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*Tuple\[int, int\].*got.*str'):
_ = ['a', 'b', 'c'] | beam.ParDo(MyDoFn())
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*Tuple\[int, int\].*got.*int'):
_ = [1, 2, 3] | (beam.ParDo(MyDoFn()) | 'again' >> beam.ParDo(MyDoFn()))
def test_typed_dofn_instance(self):
# Type hints applied to DoFn instance take precedence over decorators and
# process annotations.
@typehints.with_input_types(typehints.Tuple[int, int])
@typehints.with_output_types(int)
class MyDoFn(beam.DoFn):
def process(self, element: typehints.Tuple[int, int]) -> \
typehints.List[int]:
return [str(element)]
my_do_fn = MyDoFn().with_input_types(int).with_output_types(str)
result = [1, 2, 3] | beam.ParDo(my_do_fn)
self.assertEqual(['1', '2', '3'], sorted(result))
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*int.*got.*str'):
_ = ['a', 'b', 'c'] | beam.ParDo(my_do_fn)
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*int.*got.*str'):
_ = [1, 2, 3] | (beam.ParDo(my_do_fn) | 'again' >> beam.ParDo(my_do_fn))
def test_typed_callable_instance(self):
# Type hints applied to ParDo instance take precedence over callable
# decorators and annotations.
@typehints.with_input_types(typehints.Tuple[int, int])
@typehints.with_output_types(typehints.Generator[int])
def do_fn(element: typehints.Tuple[int, int]) -> typehints.Generator[str]:
yield str(element)
pardo = beam.ParDo(do_fn).with_input_types(int).with_output_types(str)
result = [1, 2, 3] | pardo
self.assertEqual(['1', '2', '3'], sorted(result))
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*int.*got.*str'):
_ = ['a', 'b', 'c'] | pardo
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*int.*got.*str'):
_ = [1, 2, 3] | (pardo | 'again' >> pardo)
def test_typed_callable_iterable_output(self):
# Only the outer Iterable should be stripped.
def do_fn(element: int) -> typehints.Iterable[typehints.Iterable[str]]:
return [[str(element)] * 2]
result = [1, 2] | beam.ParDo(do_fn)
self.assertEqual([['1', '1'], ['2', '2']], sorted(result))
def test_typed_dofn_method_not_iterable(self):
class MyDoFn(beam.DoFn):
def process(self, element: int) -> str:
return str(element)
with self.assertRaisesRegex(ValueError, r'str.*is not iterable'):
_ = [1, 2, 3] | beam.ParDo(MyDoFn())
def test_typed_dofn_method_return_none(self):
class MyDoFn(beam.DoFn):
def process(self, unused_element: int) -> None:
pass
result = [1, 2, 3] | beam.ParDo(MyDoFn())
self.assertListEqual([], result)
def test_typed_dofn_method_return_optional(self):
class MyDoFn(beam.DoFn):
def process(
self,
unused_element: int) -> typehints.Optional[typehints.Iterable[int]]:
pass
result = [1, 2, 3] | beam.ParDo(MyDoFn())
self.assertListEqual([], result)
def test_typed_dofn_method_return_optional_not_iterable(self):
class MyDoFn(beam.DoFn):
def process(self, unused_element: int) -> typehints.Optional[int]:
pass
with self.assertRaisesRegex(ValueError, r'int.*is not iterable'):
_ = [1, 2, 3] | beam.ParDo(MyDoFn())
def test_typed_callable_not_iterable(self):
def do_fn(element: int) -> int:
return element
with self.assertRaisesRegex(typehints.TypeCheckError,
r'int.*is not iterable'):
_ = [1, 2, 3] | beam.ParDo(do_fn)
def test_typed_dofn_kwonly(self):
class MyDoFn(beam.DoFn):
# TODO(BEAM-5878): A kwonly argument like
# timestamp=beam.DoFn.TimestampParam would not work here.
def process(self, element: int, *, side_input: str) -> \
typehints.Generator[typehints.Optional[str]]:
yield str(element) if side_input else None
my_do_fn = MyDoFn()
result = [1, 2, 3] | beam.ParDo(my_do_fn, side_input='abc')
self.assertEqual(['1', '2', '3'], sorted(result))
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*str.*got.*int.*side_input'):
_ = [1, 2, 3] | beam.ParDo(my_do_fn, side_input=1)
def test_typed_dofn_var_kwargs(self):
class MyDoFn(beam.DoFn):
def process(self, element: int, **side_inputs: typehints.Dict[str, str]) \
-> typehints.Generator[typehints.Optional[str]]:
yield str(element) if side_inputs else None
my_do_fn = MyDoFn()
result = [1, 2, 3] | beam.ParDo(my_do_fn, foo='abc', bar='def')
self.assertEqual(['1', '2', '3'], sorted(result))
with self.assertRaisesRegex(typehints.TypeCheckError,
r'requires.*str.*got.*int.*side_inputs'):
_ = [1, 2, 3] | beam.ParDo(my_do_fn, a=1)
def test_typed_callable_string_literals(self):
def do_fn(element: 'int') -> 'typehints.List[str]':
return [[str(element)] * 2]
result = [1, 2] | beam.ParDo(do_fn)
self.assertEqual([['1', '1'], ['2', '2']], sorted(result))
def test_typed_ptransform_fn(self):
# Test that type hints are propagated to the created PTransform.
@beam.ptransform_fn
@typehints.with_input_types(int)
def MyMap(pcoll):
def fn(element: int):
yield element
return pcoll | beam.ParDo(fn)
self.assertListEqual([1, 2, 3], [1, 2, 3] | MyMap())
with self.assertRaisesRegex(typehints.TypeCheckError, r'int.*got.*str'):
_ = ['a'] | MyMap()
def test_typed_ptransform_fn_conflicting_hints(self):
# In this case, both MyMap and its contained ParDo have separate type
# checks (that disagree with each other).
@beam.ptransform_fn
@typehints.with_input_types(int)
def MyMap(pcoll):
def fn(element: float):
yield element
return pcoll | beam.ParDo(fn)
with self.assertRaisesRegex(typehints.TypeCheckError,
r'ParDo.*requires.*float.*got.*int'):
_ = [1, 2, 3] | MyMap()
with self.assertRaisesRegex(typehints.TypeCheckError,
r'MyMap.*expected.*int.*got.*str'):
_ = ['a'] | MyMap()
def test_typed_dofn_string_literals(self):
class MyDoFn(beam.DoFn):
def process(self, element: 'int') -> 'typehints.List[str]':
return [[str(element)] * 2]
result = [1, 2] | beam.ParDo(MyDoFn())
self.assertEqual([['1', '1'], ['2', '2']], sorted(result))
def test_typed_map(self):
def fn(element: int) -> int:
return element * 2
result = [1, 2, 3] | beam.Map(fn)
self.assertEqual([2, 4, 6], sorted(result))
def test_typed_map_return_optional(self):
# None is a valid element value for Map.
def fn(element: int) -> typehints.Optional[int]:
if element > 1:
return element
result = [1, 2, 3] | beam.Map(fn)
self.assertCountEqual([None, 2, 3], result)
def test_typed_flatmap(self):
def fn(element: int) -> typehints.Iterable[int]:
yield element * 2
result = [1, 2, 3] | beam.FlatMap(fn)
self.assertCountEqual([2, 4, 6], result)
def test_typed_flatmap_output_hint_not_iterable(self):
def fn(element: int) -> int:
return element * 2
# This is raised (originally) in strip_iterable.
with self.assertRaisesRegex(typehints.TypeCheckError,
r'int.*is not iterable'):
_ = [1, 2, 3] | beam.FlatMap(fn)
def test_typed_flatmap_output_value_not_iterable(self):
def fn(element: int) -> typehints.Iterable[int]:
return element * 2
# This is raised in runners/common.py (process_outputs).
with self.assertRaisesRegex(TypeError, r'int.*is not iterable'):
_ = [1, 2, 3] | beam.FlatMap(fn)
def test_typed_flatmap_optional(self):
def fn(element: int) -> typehints.Optional[typehints.Iterable[int]]:
if element > 1:
yield element * 2
# Verify that the output type of fn is int and not Optional[int].
def fn2(element: int) -> int:
return element
result = [1, 2, 3] | beam.FlatMap(fn) | beam.Map(fn2)
self.assertCountEqual([4, 6], result)
def test_typed_ptransform_with_no_error(self):
class StrToInt(beam.PTransform):
def expand(
self,
pcoll: beam.pvalue.PCollection[str]) -> beam.pvalue.PCollection[int]:
return pcoll | beam.Map(lambda x: int(x))
class IntToStr(beam.PTransform):
def expand(
self,
pcoll: beam.pvalue.PCollection[int]) -> beam.pvalue.PCollection[str]:
return pcoll | beam.Map(lambda x: str(x))
_ = ['1', '2', '3'] | StrToInt() | IntToStr()
def test_typed_ptransform_with_bad_typehints(self):
class StrToInt(beam.PTransform):
def expand(
self,
pcoll: beam.pvalue.PCollection[str]) -> beam.pvalue.PCollection[int]:
return pcoll | beam.Map(lambda x: int(x))
class IntToStr(beam.PTransform):
def expand(
self,
pcoll: beam.pvalue.PCollection[str]) -> beam.pvalue.PCollection[str]:
return pcoll | beam.Map(lambda x: str(x))
with self.assertRaisesRegex(typehints.TypeCheckError,
"Input type hint violation at IntToStr: "
"expected <class 'str'>, got <class 'int'>"):
_ = ['1', '2', '3'] | StrToInt() | IntToStr()
def test_typed_ptransform_with_bad_input(self):
class StrToInt(beam.PTransform):
def expand(
self,
pcoll: beam.pvalue.PCollection[str]) -> beam.pvalue.PCollection[int]:
return pcoll | beam.Map(lambda x: int(x))
class IntToStr(beam.PTransform):
def expand(
self,
pcoll: beam.pvalue.PCollection[int]) -> beam.pvalue.PCollection[str]:
return pcoll | beam.Map(lambda x: str(x))
with self.assertRaisesRegex(typehints.TypeCheckError,
"Input type hint violation at StrToInt: "
"expected <class 'str'>, got <class 'int'>"):
# Feed integers to a PTransform that expects strings
_ = [1, 2, 3] | StrToInt() | IntToStr()
def test_typed_ptransform_with_partial_typehints(self):
class StrToInt(beam.PTransform):
def expand(self, pcoll) -> beam.pvalue.PCollection[int]:
return pcoll | beam.Map(lambda x: int(x))
class IntToStr(beam.PTransform):
def expand(
self,
pcoll: beam.pvalue.PCollection[int]) -> beam.pvalue.PCollection[str]:
return pcoll | beam.Map(lambda x: str(x))
# Feed integers to a PTransform that should expect strings
# but has no typehints so it expects any
_ = [1, 2, 3] | StrToInt() | IntToStr()
def test_typed_ptransform_with_bare_wrappers(self):
class StrToInt(beam.PTransform):
def expand(
self, pcoll: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
return pcoll | beam.Map(lambda x: int(x))
class IntToStr(beam.PTransform):
def expand(
self,
pcoll: beam.pvalue.PCollection[int]) -> beam.pvalue.PCollection[str]:
return pcoll | beam.Map(lambda x: str(x))
_ = [1, 2, 3] | StrToInt() | IntToStr()
def test_typed_ptransform_with_no_typehints(self):
class StrToInt(beam.PTransform):
def expand(self, pcoll):
return pcoll | beam.Map(lambda x: int(x))
class IntToStr(beam.PTransform):
def expand(
self,
pcoll: beam.pvalue.PCollection[int]) -> beam.pvalue.PCollection[str]:
return pcoll | beam.Map(lambda x: str(x))
# Feed integers to a PTransform that should expect strings
# but has no typehints so it expects any
_ = [1, 2, 3] | StrToInt() | IntToStr()
def test_typed_ptransform_with_generic_annotations(self):
T = typing.TypeVar('T')
class IntToInt(beam.PTransform):
def expand(
self,
pcoll: beam.pvalue.PCollection[T]) -> beam.pvalue.PCollection[T]:
return pcoll | beam.Map(lambda x: x)
class IntToStr(beam.PTransform):
def expand(
self,
pcoll: beam.pvalue.PCollection[T]) -> beam.pvalue.PCollection[str]:
return pcoll | beam.Map(lambda x: str(x))
_ = [1, 2, 3] | IntToInt() | IntToStr()
def test_typed_ptransform_with_do_outputs_tuple_compiles(self):
class MyDoFn(beam.DoFn):
def process(self, element: int, *args, **kwargs):
if element % 2:
yield beam.pvalue.TaggedOutput('odd', 1)
else:
yield beam.pvalue.TaggedOutput('even', 1)
class MyPTransform(beam.PTransform):
def expand(self, pcoll: beam.pvalue.PCollection[int]):
return pcoll | beam.ParDo(MyDoFn()).with_outputs('odd', 'even')
# This test fails if you remove the following line from ptransform.py
# if isinstance(pvalue_, DoOutputsTuple): continue
_ = [1, 2, 3] | MyPTransform()
class AnnotationsTest(unittest.TestCase):
def test_pardo_dofn(self):
class MyDoFn(beam.DoFn):
def process(self, element: int) -> typehints.Generator[str]:
yield str(element)
th = beam.ParDo(MyDoFn()).get_type_hints()
self.assertEqual(th.input_types, ((int, ), {}))
self.assertEqual(th.output_types, ((str, ), {}))
def test_pardo_dofn_not_iterable(self):
class MyDoFn(beam.DoFn):
def process(self, element: int) -> str:
return str(element)
with self.assertRaisesRegex(ValueError, r'str.*is not iterable'):
_ = beam.ParDo(MyDoFn()).get_type_hints()
def test_pardo_wrapper(self):
def do_fn(element: int) -> typehints.Iterable[str]:
return [str(element)]
th = beam.ParDo(do_fn).get_type_hints()
self.assertEqual(th.input_types, ((int, ), {}))
self.assertEqual(th.output_types, ((str, ), {}))
def test_pardo_wrapper_tuple(self):
# Test case for callables that return key-value pairs for GBK. The outer
# Iterable should be stripped but the inner Tuple left intact.
def do_fn(element: int) -> typehints.Iterable[typehints.Tuple[str, int]]:
return [(str(element), element)]
th = beam.ParDo(do_fn).get_type_hints()
self.assertEqual(th.input_types, ((int, ), {}))
self.assertEqual(th.output_types, ((typehints.Tuple[str, int], ), {}))
def test_pardo_wrapper_not_iterable(self):
def do_fn(element: int) -> str:
return str(element)
with self.assertRaisesRegex(typehints.TypeCheckError,
r'str.*is not iterable'):
_ = beam.ParDo(do_fn).get_type_hints()
def test_flat_map_wrapper(self):
def map_fn(element: int) -> typehints.Iterable[int]:
return [element, element + 1]
th = beam.FlatMap(map_fn).get_type_hints()
self.assertEqual(th.input_types, ((int, ), {}))
self.assertEqual(th.output_types, ((int, ), {}))
def test_flat_map_wrapper_optional_output(self):
# Optional should not affect output type (Nones are ignored).
def map_fn(element: int) -> typehints.Optional[typehints.Iterable[int]]:
return [element, element + 1]
th = beam.FlatMap(map_fn).get_type_hints()
self.assertEqual(th.input_types, ((int, ), {}))
self.assertEqual(th.output_types, ((int, ), {}))
@unittest.skip('BEAM-8662: Py3 annotations not yet supported for MapTuple')
def test_flat_map_tuple_wrapper(self):
# TODO(BEAM-8662): Also test with a fn that accepts default arguments.
def tuple_map_fn(a: str, b: str, c: str) -> typehints.Iterable[str]:
return [a, b, c]
th = beam.FlatMapTuple(tuple_map_fn).get_type_hints()
self.assertEqual(th.input_types, ((str, str, str), {}))
self.assertEqual(th.output_types, ((str, ), {}))
def test_map_wrapper(self):
def map_fn(unused_element: int) -> int:
return 1
th = beam.Map(map_fn).get_type_hints()
self.assertEqual(th.input_types, ((int, ), {}))
self.assertEqual(th.output_types, ((int, ), {}))
def test_map_wrapper_optional_output(self):
# Optional does affect output type (Nones are NOT ignored).
def map_fn(unused_element: int) -> typehints.Optional[int]:
return 1
th = beam.Map(map_fn).get_type_hints()
self.assertEqual(th.input_types, ((int, ), {}))
self.assertEqual(th.output_types, ((typehints.Optional[int], ), {}))
@unittest.skip('BEAM-8662: Py3 annotations not yet supported for MapTuple')
def test_map_tuple(self):
# TODO(BEAM-8662): Also test with a fn that accepts default arguments.
def tuple_map_fn(a: str, b: str, c: str) -> str:
return a + b + c
th = beam.MapTuple(tuple_map_fn).get_type_hints()
self.assertEqual(th.input_types, ((str, str, str), {}))
self.assertEqual(th.output_types, ((str, ), {}))
def test_filter_wrapper(self):
def filter_fn(element: int) -> bool:
return bool(element % 2)
th = beam.Filter(filter_fn).get_type_hints()
self.assertEqual(th.input_types, ((int, ), {}))
self.assertEqual(th.output_types, ((int, ), {}))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import collections
from generator import generator, generate
from nose.plugins.attrib import attr
import cloudferry_devlab.tests.config as config
from cloudferry_devlab.tests import functional_test
from cloudferry_devlab.tests import test_exceptions
@generator
class GlanceMigrationTests(functional_test.FunctionalTest):
"""Test Case class which includes glance images migration cases."""
def setUp(self):
super(GlanceMigrationTests, self).setUp()
self.dst_images = [x for x in self.dst_cloud.glanceclient.images.list()
]
@attr(migrated_tenant=['admin', 'tenant1', 'tenant2'])
def test_image_members(self):
"""Validate image members were migrated with correct names.
"""
def member_list_collector(_images, client, auth_client):
_members = []
for img in _images:
members = client.image_members.list(img.id)
if not members:
continue
mbr_list = []
for mem in members:
mem_name = auth_client.tenants.find(id=mem.member_id).name
mem_name = self.migration_utils.check_mapped_tenant(
tenant_name=mem_name)
mbr_list.append(mem_name)
_members.append({img.name: sorted(mbr_list)})
return sorted(_members)
src_images = [img for img in self.src_cloud.glanceclient.images.list()
if img.name not in
config.images_not_included_in_filter and
img.name not in config.images_blacklisted]
dst_images = [img for img in self.dst_cloud.glanceclient.images.list(
is_public=None)]
src_members = member_list_collector(src_images,
self.src_cloud.glanceclient,
self.src_cloud.keystoneclient)
dst_members = member_list_collector(dst_images,
self.dst_cloud.glanceclient,
self.dst_cloud.keystoneclient)
missed_members = [member for member in src_members if member
not in dst_members]
if missed_members:
self.fail("Members: %s not in the DST list of image members."
% missed_members)
@attr(migrated_tenant=['admin', 'tenant1', 'tenant2'])
@attr(migration_engine=['migrate2'])
@generate('name', 'disk_format', 'container_format', 'size', 'checksum',
'status', 'deleted', 'min_disk', 'protected', 'min_ram',
'is_public', 'virtual_size', 'id')
def test_migrate_glance_images(self, param):
"""Validate images were migrated with correct parameters.
:param name: image name
:param disk_format: raw, vhd, vmdk, vdi, iso, qcow2, etc
:param container_format: bare, ovf, ova, etc
:param size: image size
:param checksum: MD5 checksum of the image file data
:param status: image status
:param deleted: is image deleted
:param min_disk: minimum disk size
:param protected: is image protected
:param min_ram: ram required for image
:param is_public: is image public
:param virtual_size: size of the virtual disk
:param id: image id"""
src_images = self.filter_images({'delete_on_dst': True})
src_images = self.filtering_utils.filter_images(src_images)[0]
dst_images = [img for img in self.dst_cloud.glanceclient.images.list(
is_public=None)]
self.validate_resource_parameter_in_dst(src_images, dst_images,
resource_name='image',
parameter=param)
@attr(migrated_tenant=['admin', 'tenant1', 'tenant2'])
@attr(migration_engine=['migrate2'])
def test_migrate_deleted_glance_images_only_once(self):
"""Validate deleted and broken images were migrated to dst only once.
Scenario:
1. Get deleted and broken image's ids from src
2. Get all images from dst
3. Verify each deleted and broken image has been restored once
"""
src_vms = self.src_cloud.novaclient.servers.list(
search_opts={'all_tenants': True})
src_img_ids = [i.id for i in self.src_cloud.glanceclient.images.list()]
# getting images, from which vms were spawned, but which do not exist
# in the glance
to_restore_img_ids = []
for vm in src_vms:
if vm.image and vm.image['id'] not in src_img_ids:
to_restore_img_ids.append(vm.image['id'])
# getting 'broken' images (which exist in the glance, but deleted in
# storage)
all_images = self.migration_utils.get_all_images_from_config()
broken_images = [i['name'] for i in all_images if i.get('broken')]
src_images = self.src_cloud.glanceclient.images.list()
to_restore_img_ids.extend([image.id for image in src_images
if image.name in broken_images])
restored_dst_images = collections.defaultdict(int)
for deleted_img_id in set(to_restore_img_ids):
for dst_image in self.dst_images:
if dst_image.name and deleted_img_id in dst_image.name:
restored_dst_images[deleted_img_id] += 1
msg = 'Image "%s" was re-created %s times. '
error_msg = ''
for image in restored_dst_images:
if restored_dst_images[image] > 1:
error_msg += msg % (image, restored_dst_images[image])
if error_msg:
self.fail(error_msg)
@attr(migrated_tenant=['tenant1', 'tenant2'])
def test_migrate_glance_image_belongs_to_deleted_tenant(self):
"""Validate images from deleted tenants were migrated to dst admin
tenant."""
src_image_names = []
def get_image_by_name(image_list, img_name):
for image in image_list:
if image.name == img_name:
return image
for tenant in config.tenants:
if tenant.get('deleted') and tenant.get('images'):
src_image_names.extend([image['name'] for image in
tenant['images']])
dst_image_names = [image.name for image in self.dst_images]
dst_tenant_id = self.dst_cloud.get_tenant_id(self.dst_cloud.tenant)
missed_images = []
wrong_image_members = []
for image_name in src_image_names:
if image_name not in dst_image_names:
missed_images.append(image_name)
image = get_image_by_name(self.dst_images, image_name)
if image.owner != dst_tenant_id:
wrong_image_members.append(image.owner)
if missed_images:
msg = 'Images {0} is not in DST image list: {1}'.format(
missed_images, dst_image_names)
if wrong_image_members:
msg += '\nand\nImage owners on dst is {0} instead of {1}'\
.format(wrong_image_members, dst_tenant_id)
self.fail(msg)
@attr(migrated_tenant=['admin', 'tenant1', 'tenant2'])
@attr(migration_engine=['migrate2'])
def test_glance_images_not_in_filter_did_not_migrate(self):
"""Validate images not in filter weren't migrated."""
migrated_images_not_in_filter = [image for image in
config.images_not_included_in_filter
if image in self.dst_images]
if migrated_images_not_in_filter:
self.fail('Image migrated despite that it was not included '
'in filter, Images info: \n{}'.format(
migrated_images_not_in_filter))
@attr(migrated_tenant=['tenant7'])
def test_glance_images_blacklisted_did_not_migrate(self):
"""Validate images blacklisted weren't migrated."""
migrated_images_blacklisted = [image for image in
config.images_blacklisted
if image in self.dst_images]
if migrated_images_blacklisted:
self.fail('Image migrated despite that it was blacklisted,'
'Images info: \n{}'.format(
migrated_images_blacklisted))
@attr(migration_engine=['migrate2'])
def test_glance_image_deleted_and_migrated_second_time_with_new_id(self):
"""Validate deleted images were migrated second time with new id."""
src_images = []
for image in config.images:
if image.get('delete_on_dst'):
src_images.append(image)
images_with_same_id = []
for src_image in src_images:
src_image = self.src_cloud.glanceclient.images.get(
src_image['id'])
images_with_same_id.extend([dst_image.name for dst_image
in self.dst_images
if src_image.name == dst_image.name and
src_image.id == dst_image.id])
if images_with_same_id:
msg = "The images with name {src_image_name} have the "\
"same ID on dst - must be different for this image,"\
"because this image was migrated and deleted on dst. "\
"On the next migration must be generated new ID"
self.fail(msg=msg.format(src_image_name=images_with_same_id))
@attr(migrated_tenant=['admin', 'tenant1', 'tenant2'])
def test_not_valid_images_did_not_migrate(self):
"""Validate images with invalid statuses weren't migrated.
Invalid images have 'broken': True value in :mod:`config.py`
"""
all_images = self.migration_utils.get_all_images_from_config()
images = [image['name'] for image in all_images if image.get('broken')]
migrated_images = []
for image in images:
try:
self.dst_cloud.get_image_id(image)
migrated_images.append(image)
except test_exceptions.NotFound:
pass
if migrated_images:
self.fail('Not valid images %s migrated')
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from collections import defaultdict
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir
from pants.contrib.go.subsystems.fetcher_factory import FetcherFactory
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.tasks.go_task import GoTask
class GoFetch(GoTask):
"""Fetches third-party Go libraries."""
@classmethod
def implementation_version(cls):
return super(GoFetch, cls).implementation_version() + [('GoFetch', 2)]
@classmethod
def subsystem_dependencies(cls):
return super(GoFetch, cls).subsystem_dependencies() + (FetcherFactory,)
@classmethod
def product_types(cls):
return ['go_remote_lib_src']
@classmethod
def register_options(cls, register):
pass
@property
def cache_target_dirs(self):
# TODO(John Sirois): See TODO in _fetch_pkg, re-consider how artifact caching works for fetches.
return True
def execute(self):
self.context.products.safe_create_data('go_remote_lib_src', lambda: defaultdict(str))
go_remote_libs = self.context.targets(self.is_remote_lib)
if not go_remote_libs:
return
undeclared_deps = self._transitive_download_remote_libs(set(go_remote_libs))
if undeclared_deps:
self._log_undeclared_deps(undeclared_deps)
raise TaskError('Failed to resolve transitive Go remote dependencies.')
def _log_undeclared_deps(self, undeclared_deps):
for dependee, deps in undeclared_deps.items():
self.context.log.error('{address} has remote dependencies which require local declaration:'
.format(address=dependee.address.reference()))
for dep_import_path, address in deps:
self.context.log.error('\t--> {import_path} (expected go_remote_library declaration '
'at {address})'.format(import_path=dep_import_path,
address=address.reference()))
@staticmethod
def _get_fetcher(import_path):
return FetcherFactory.global_instance().get_fetcher(import_path)
def _fetch_pkg(self, gopath, pkg, rev):
"""Fetch the package and setup symlinks."""
fetcher = self._get_fetcher(pkg)
root = fetcher.root()
root_dir = os.path.join(self.workdir, 'fetches', root, rev)
# Only fetch each remote root once.
if not os.path.exists(root_dir):
with temporary_dir() as tmp_fetch_root:
with self.context.new_workunit('fetch {}'.format(pkg)):
fetcher.fetch(dest=tmp_fetch_root, rev=rev)
safe_mkdir(root_dir)
for path in os.listdir(tmp_fetch_root):
shutil.move(os.path.join(tmp_fetch_root, path), os.path.join(root_dir, path))
# TODO(John Sirois): Circle back and get get rid of this symlink tree.
# GoWorkspaceTask will further symlink a single package from the tree below into a
# target's workspace when it could just be linking from the fetch_dir. The only thing
# standing in the way is a determination of what we want to artifact cache. If we don't
# want to cache fetched zips, linking straight from the fetch_dir works simply. Otherwise
# thought needs to be applied to using the artifact cache directly or synthesizing a
# canonical owner target for the fetched files that 'child' targets (subpackages) can
# depend on and share the fetch from.
dest_dir = os.path.join(gopath, 'src', root)
# We may have been `invalidate`d and not `clean-all`ed so we need a new empty symlink
# chroot to avoid collision; thus `clean=True`.
safe_mkdir(dest_dir, clean=True)
for path in os.listdir(root_dir):
os.symlink(os.path.join(root_dir, path), os.path.join(dest_dir, path))
# Note: Will update import_root_map.
def _map_fetched_remote_source(self, go_remote_lib, gopath, all_known_remote_libs,
resolved_remote_libs, undeclared_deps, import_root_map):
# See if we've computed the remote import paths for this rev of this lib in a previous run.
remote_import_paths_cache = os.path.join(os.path.dirname(gopath), 'remote_import_paths.txt')
if os.path.exists(remote_import_paths_cache):
with open(remote_import_paths_cache, 'r') as fp:
remote_import_paths = [line.decode('utf8').strip() for line in fp.readlines()]
else:
remote_import_paths = self._get_remote_import_paths(go_remote_lib.import_path,
gopath=gopath)
with safe_concurrent_creation(remote_import_paths_cache) as safe_path:
with open(safe_path, 'w') as fp:
for path in remote_import_paths:
fp.write('{}\n'.format(path).encode('utf8'))
for remote_import_path in remote_import_paths:
remote_root = import_root_map.get(remote_import_path)
if remote_root is None:
fetcher = self._get_fetcher(remote_import_path)
remote_root = fetcher.root()
import_root_map[remote_import_path] = remote_root
spec_path = os.path.join(go_remote_lib.target_base, remote_root)
package_path = GoRemoteLibrary.remote_package_path(remote_root, remote_import_path)
target_name = package_path or os.path.basename(remote_root)
address = Address(spec_path, target_name)
if not any(address == lib.address for lib in all_known_remote_libs):
try:
# If we've already resolved a package from this remote root, its ok to define an
# implicit synthetic remote target for all other packages in the same remote root.
same_remote_libs = [lib for lib in all_known_remote_libs
if spec_path == lib.address.spec_path]
implicit_ok = any(same_remote_libs)
# If we're creating a synthetic remote target, we should pin it to the same
# revision as the rest of the library.
rev = None
if implicit_ok:
rev = same_remote_libs[0].rev
remote_lib = self._resolve(go_remote_lib, address, package_path, rev, implicit_ok)
resolved_remote_libs.add(remote_lib)
all_known_remote_libs.add(remote_lib)
except self.UndeclaredRemoteLibError as e:
undeclared_deps[go_remote_lib].add((remote_import_path, e.address))
self.context.build_graph.inject_dependency(go_remote_lib.address, address)
def _transitive_download_remote_libs(self, go_remote_libs, all_known_remote_libs=None):
"""Recursively attempt to resolve / download all remote transitive deps of go_remote_libs.
Returns a dict<GoRemoteLibrary, set<tuple<str, Address>>>, which maps a go remote library to a
set of unresolved remote dependencies, each dependency expressed as a tuple containing the
the import path of the dependency and the expected target address. If all transitive
dependencies were successfully resolved, returns an empty dict.
Downloads as many invalidated transitive dependencies as possible, and returns as many
undeclared dependencies as possible. However, because the dependencies of a remote library
can only be determined _after_ it has been downloaded, a transitive dependency of an undeclared
remote library will never be detected.
Because go_remote_libraries do not declare dependencies (rather, they are inferred), injects
all successfully resolved transitive dependencies into the build graph.
"""
if not go_remote_libs:
return {}
all_known_remote_libs = all_known_remote_libs or set()
all_known_remote_libs.update(go_remote_libs)
resolved_remote_libs = set()
undeclared_deps = defaultdict(set)
go_remote_lib_src = self.context.products.get_data('go_remote_lib_src')
with self.invalidated(go_remote_libs) as invalidation_check:
# We accumulate mappings from import path to root (e.g., example.org/pkg/foo -> example.org)
# from all targets in this map, so that targets share as much of this information as
# possible during this run.
# We cache these mappings. to avoid repeatedly fetching them over the network via the
# meta tag protocol. Note that this mapping is unversioned: It's defined as "whatever meta
# tag is currently being served at the relevant URL", which is inherently independent of
# the rev of the remote library. We (and the entire Go ecosystem) assume that this mapping
# never changes, in practice.
import_root_map = {}
for vt in invalidation_check.all_vts:
import_root_map_path = os.path.join(vt.results_dir, 'pkg_root_map.txt')
import_root_map.update(self._read_import_root_map_file(import_root_map_path))
go_remote_lib = vt.target
gopath = os.path.join(vt.results_dir, 'gopath')
if not vt.valid:
self._fetch_pkg(gopath, go_remote_lib.import_path, go_remote_lib.rev)
# _map_fetched_remote_source() will modify import_root_map.
self._map_fetched_remote_source(go_remote_lib, gopath, all_known_remote_libs,
resolved_remote_libs, undeclared_deps, import_root_map)
go_remote_lib_src[go_remote_lib] = os.path.join(gopath, 'src', go_remote_lib.import_path)
# Cache the mapping against this target's key. Note that because we accumulate
# mappings across targets, the file may contain mappings that this target doesn't
# need or care about (although it will contain all the mappings this target does need).
# But the file is small, so there's no harm in this redundancy.
self._write_import_root_map_file(import_root_map_path, import_root_map)
# Recurse after the invalidated block, so the libraries we downloaded are now "valid"
# and thus we don't try to download a library twice.
trans_undeclared_deps = self._transitive_download_remote_libs(resolved_remote_libs,
all_known_remote_libs)
undeclared_deps.update(trans_undeclared_deps)
return undeclared_deps
class UndeclaredRemoteLibError(Exception):
def __init__(self, address):
self.address = address
def _resolve(self, dependent_remote_lib, address, pkg, rev, implicit_ok):
"""Resolves the GoRemoteLibrary at `address` defining the given `pkg`.
If `implicit_ok` is True, then a GoRemoteLibrary to own `pkg` is always synthesized if it does
not already exist; otherwise the address must already exist in the build graph (a BUILD file
must exist on disk that owns the given `pkg` and declares a `rev` for it).
:param dependent_remote_lib: The remote library that depends on the remote `pkg`.
:type: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:param address: The address of the remote library that should own `pkg`.
:type: :class:`pants.base.Address`
:param string pkg: The remote package path whose owning target needs to be resolved.
:param string rev: The revision of the package. None defaults to `master`.
:param bool implicit_ok: `False` if the given `address` must be defined in a BUILD file on disk;
otherwise a remote library to own `pkg` will always be created and
returned.
:returns: The resulting resolved remote library after injecting it in the build graph.
:rtype: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:raises: :class:`GoFetch.UndeclaredRemoteLibError`: If no BUILD file exists for the remote root
`pkg` lives in.
"""
try:
self.context.build_graph.inject_address_closure(address)
except AddressLookupError:
if implicit_ok:
self.context.add_new_target(address=address,
target_base=dependent_remote_lib.target_base,
target_type=GoRemoteLibrary,
pkg=pkg,
rev=rev)
else:
raise self.UndeclaredRemoteLibError(address)
return self.context.build_graph.get_target(address)
@staticmethod
def _is_relative(import_path):
return import_path.startswith('.')
def _get_remote_import_paths(self, pkg, gopath=None):
"""Returns the remote import paths declared by the given remote Go `pkg`.
NB: This only includes production code imports, no test code imports.
"""
import_listing = self.import_oracle.list_imports(pkg, gopath=gopath)
return [imp for imp in import_listing.imports
if (not self.import_oracle.is_go_internal_import(imp) and
# We assume relative imports are local to the package and skip attempts to
# recursively resolve them.
not self._is_relative(imp))]
@staticmethod
def _read_import_root_map_file(path):
"""Reads a file mapping import paths to roots (e.g., example.org/pkg/foo -> example.org)."""
if os.path.exists(path):
with open(path, 'r') as fp:
return dict({import_path: root for import_path, root in
(x.decode('utf8').strip().split('\t') for x in fp.readlines())})
else:
return {}
@staticmethod
def _write_import_root_map_file(path, import_root_map):
"""Writes a file mapping import paths to roots."""
with safe_concurrent_creation(path) as safe_path:
with open(safe_path, 'w') as fp:
for import_path, root in sorted(import_root_map.items()):
fp.write('{}\t{}\n'.format(import_path, root).encode('utf8'))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import sys
import subprocess
import os
import urllib
from zipfile import ZipFile
from shutil import rmtree
import urlparse
spark_versions = \
{
"2.3.0": {"hadoop_versions": ["2.6", "2.7"]},
"2.2.1": {"hadoop_versions": ["2.6", "2.7"]},
"2.2.0": {"hadoop_versions": ["2.6", "2.7"]},
"2.1.0": {"hadoop_versions": ["2.3", "2.4", "2.6", "2.7"]},
"2.0.2": {"hadoop_versions": ["2.3", "2.4", "2.6", "2.7"]},
"2.0.1": {"hadoop_versions": ["2.3", "2.4", "2.6", "2.7"]},
"2.0.0": {"hadoop_versions": ["2.3", "2.4", "2.6", "2.7"]},
"1.6.2": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.6.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.6.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.5.2": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.5.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.5.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.4.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.4.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.3.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.3.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.2.2": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.2.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.2.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.1.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.1.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.0.2": {"hadoop_versions": ["1", "cdh4"]},
"1.0.1": {"hadoop_versions": ["1", "cdh4"]},
"1.0.0": {"hadoop_versions": ["1", "cdh4"]},
}
toree_versions = \
{
"1" : "https://www.apache.org/dist/incubator/toree/0.1.0-incubating/toree-pip/apache-toree-0.1.0.tar.gz",
"2" : "https://www.apache.org/dist/incubator/toree/0.2.0-incubating/toree-pip/toree-0.2.0.tar.gz",
"3" : "https://www.apache.org/dist/incubator/toree/0.3.0-incubating/toree-pip/toree-0.3.0.tar.gz"
}
parser = argparse.ArgumentParser(description='Spark cluster deploy tools for Openstack.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='Usage real-life examples:\t\n'
' ./spark-openstack -k borisenko -i ~/.ssh/id_rsa -s 2 -t spark.large -a 20545e58-59de-4212-a83f-3703b31622cf -n computations-net -f external_network --async launch spark-cluster\n'
' ./spark-openstack --async destroy spark-cluster\n'
'Look through README.md for more advanced usage examples.\n'
'Apache 2.0, ISP RAS 2016 (http://ispras.ru/en).\n')
parser.add_argument('act', type=str,
choices=["launch", "destroy", "get-master", "config", "runner"])
parser.add_argument('cluster_name', help="Name for your cluster")
parser.add_argument('option', nargs='?')
parser.add_argument('-k', '--key-pair')
parser.add_argument("-i", "--identity-file")
parser.add_argument("-s", "--slaves", type=int)
parser.add_argument("-n", "--virtual-network", help="Your virtual Openstack network id for cluster. If have only one network, you may not specify it")
parser.add_argument("-f", "--floating-ip-pool", help="Floating IP pool")
parser.add_argument("-t", "--instance-type")
parser.add_argument("-m", "--master-instance-type", help="master instance type, defaults to same as slave instance type")
parser.add_argument("-a", "--image-id")
parser.add_argument("-w", help="ignored")
parser.add_argument("--create", action="store_true", help="Note that cluster should be created")
parser.add_argument("--deploy-spark", action="store_true", help="Should we deploy Spark (with Hadoop)")
parser.add_argument("--mountnfs", action="store_true", help="Should we run mountnfs")
parser.add_argument("--use-oracle-java", action="store_true", help="Use Oracle Java. If not set, OpenJDK is used")
parser.add_argument("--spark-worker-mem-mb", type=int, help="force worker memory value in megabytes (e.g. 14001)")
parser.add_argument("-j", "--deploy-jupyter", action='store_true', help="Should we deploy jupyter on master node.")
parser.add_argument("-jh", "--deploy-jupyterhub",action='store_true', help="Should we deploy jupyterHub on master node")
parser.add_argument("--spark-version", default="1.6.2", help="Spark version to use")
parser.add_argument("--hadoop-version", help="Hadoop version to use")
parser.add_argument("--boot-from-volume", default=False, help="Should the cluster be based on Cinder volumes. "
"Use it wisely")
parser.add_argument("--hadoop-user", default="ubuntu", help="User to use/create for cluster members")
parser.add_argument("--ansible-bin", help="path to ansible (and ansible-playbook, default='')")
parser.add_argument("--swift-username", help="Username for Swift object storage. If not specified, swift integration "
"is commented out in core-site.xml. You can also use OS_SWIFT_USERNAME"
"environment variable")
parser.add_argument("--swift-password", help="Username for Swift object storage. If not specified, swift integration "
"is commented out in core-site.xml. You can also use OS_SWIFT_PASSWORD"
"environment variable")
parser.add_argument("--nfs-share", default=[], nargs=2, metavar=("<nfs-path>", "<mount-path>"),
help="Should we mount some NFS share(s) on instances",
action='append')
parser.add_argument("--extra-jars", action="append", help="Add/replace extra jars to Spark (during launch). Jar file names must be different")
parser.add_argument("--deploy-ignite", action='store_true', help="Should we deploy Apache Ignite.")
parser.add_argument("--ignite-memory", default=50, type=float, help="Percentage of Spark worker memory to be given to Apache Ignite.")
parser.add_argument("--ignite-version", default="2.7.5", help="Apache Ignite version to use.")
parser.add_argument("--yarn", action='store_true', help="Should we deploy using Apache YARN.")
parser.add_argument("--deploy-elastic", action='store_true', help="Should we deploy ElasticSearch")
parser.add_argument("--es-heap-size", default='1g', help="ElasticSearch heap size")
parser.add_argument("--deploy-cassandra", action='store_true', help="Should we deploy Apache Cassandra")
parser.add_argument("--cassandra-version", default="3.11.4", help="Apache Cassandra version to use")
parser.add_argument("--skip-packages", action='store_true',
help="Skip package installation (Java, rsync, etc). Image must contain all required packages.")
parser.add_argument("--async", action="store_true",
help="Async Openstack operations (may not work with some Openstack environments)")
parser.add_argument("--tags", help="Ansible: run specified tags")
parser.add_argument("--skip-tags", help="Ansible: skip specified tags")
#parser.add_argument("--step", action="store_true", help="Execute play step-by-step")
args, unknown = parser.parse_known_args()
if args.tags is not None:
unknown.append("--tags")
unknown.append(args.tags)
if args.skip_tags is not None:
unknown.append("--skip-tags")
unknown.append(args.skip_tags)
if args.master_instance_type is None:
args.master_instance_type = args.instance_type
if "_" in args.cluster_name:
print("WARNING: '_' symbols in cluster name are not supported, replacing with '-'")
args.cluster_name = args.cluster_name.replace('_', '-')
ansible_cmd = "ansible"
ansible_playbook_cmd = "ansible-playbook"
if args.ansible_bin is not None:
ansible_cmd = os.path.join(args.ansible_bin, "ansible")
ansible_playbook_cmd = os.path.join(args.ansible_bin, "ansible-playbook")
def get_cassandra_connector_jar(spark_version):
spark_cassandra_connector_url = "http://dl.bintray.com/spark-packages/maven/datastax/spark-cassandra-connector/1.6.8-s_2.10/spark-cassandra-connector-1.6.8-s_2.10.jar" \
if args.spark_version.startswith("1.6") \
else "http://dl.bintray.com/spark-packages/maven/datastax/spark-cassandra-connector/2.0.3-s_2.11/spark-cassandra-connector-2.0.3-s_2.11.jar"
spark_cassandra_connector_filename = "/tmp/" + os.path.basename(urlparse.urlsplit(spark_cassandra_connector_url).path)
if not os.path.exists(spark_cassandra_connector_filename):
print("Downloading Spark Cassandra Connector for Spark version {0}".format(spark_version))
urllib.urlretrieve(spark_cassandra_connector_url,filename=spark_cassandra_connector_filename)
return spark_cassandra_connector_filename
def get_elastic_jar():
elastic_hadoop_url = "http://download.elastic.co/hadoop/elasticsearch-hadoop-5.5.0.zip"
elastic_hadoop_filename = "/tmp/" + os.path.basename(urlparse.urlsplit(elastic_hadoop_url).path)
elastic_dir = "/tmp/elasticsearch-hadoop/"
archive_path = "elasticsearch-hadoop-5.5.0/dist/elasticsearch-hadoop-5.5.0.jar"
elastic_path = os.path.join(elastic_dir, archive_path)
if not os.path.exists(elastic_path):
print("Downloading ElasticSearch Hadoop integration")
urllib.urlretrieve(elastic_hadoop_url, filename=elastic_hadoop_filename)
with ZipFile(elastic_hadoop_filename) as archive:
archive.extract(archive_path, path=elastic_dir)
return elastic_path
else:
return elastic_path
def make_extra_vars():
extra_vars = dict()
extra_vars["act"] = args.act
extra_vars["n_slaves"] = args.slaves
extra_vars["cluster_name"] = args.cluster_name
extra_vars["os_image"] = args.image_id
extra_vars["os_key_name"] = args.key_pair
extra_vars["flavor"] = args.instance_type
extra_vars["master_flavor"] = args.master_instance_type
extra_vars["floating_ip_pool"] = args.floating_ip_pool
extra_vars["virtual_network"] = args.virtual_network
extra_vars["ansible_user"] = args.hadoop_user
extra_vars["ansible_ssh_private_key_file"] = args.identity_file
extra_vars["os_project_name"] = os.getenv('OS_PROJECT_NAME') or os.getenv('OS_TENANT_NAME')
if not extra_vars["os_project_name"]:
print("It seems that you h aven't sources your Openstack OPENRC file; quiting")
exit(-1)
extra_vars["os_auth_url"] = os.getenv('OS_AUTH_URL')
if not extra_vars["os_auth_url"]:
print("It seems that you haven't sources your Openstack OPENRC file; quiting")
exit(-1)
extra_vars["hadoop_user"] = args.hadoop_user
if args.act == 'launch':
extra_vars["create_cluster"] = args.create
extra_vars["deploy_spark"] = args.deploy_spark
extra_vars["mountnfs"] = args.mountnfs
extra_vars["spark_version"] = args.spark_version
if args.hadoop_version:
if args.hadoop_version not in spark_versions[args.spark_version]["hadoop_versions"]:
print("Chosen Spark version doesn't support selected Hadoop version")
exit(-1)
extra_vars["hadoop_version"] = args.hadoop_version
else:
extra_vars["hadoop_version"] = spark_versions[args.spark_version]["hadoop_versions"][-1]
print("Deploying Apache Spark %s with Apache Hadoop %s"
% (extra_vars["spark_version"], extra_vars["hadoop_version"]))
extra_vars["boot_from_volume"] = args.boot_from_volume
extra_vars["os_swift_username"] = args.swift_username or os.getenv('OS_SWIFT_USERNAME') or None
if not extra_vars["os_swift_username"]:
del extra_vars["os_swift_username"]
extra_vars["os_swift_password"] = args.swift_password or os.getenv('OS_SWIFT_PASSWORD') or None
if not extra_vars["os_swift_password"]:
del extra_vars["os_swift_password"]
extra_vars["use_oracle_java"] = args.use_oracle_java
extra_vars["deploy_jupyter"] = args.deploy_jupyter
if (args.deploy_jupyter):
extra_vars["toree_version"] = toree_versions[extra_vars["spark_version"][0]]
extra_vars["deploy_jupyterhub"] = args.deploy_jupyterhub
extra_vars["nfs_shares"] = [{"nfs_path": l[0], "mount_path": l[1]} for l in args.nfs_share]
extra_vars["use_yarn"] = args.yarn
#ElasticSearch deployment => --extra-args
extra_vars["deploy_elastic"] = args.deploy_elastic
extra_vars["es_heap_size"] = args.es_heap_size
#Cassandra deployment => --extra-args
extra_vars["deploy_cassandra"] = args.deploy_cassandra
extra_vars["cassandra_version"] = args.cassandra_version
extra_vars["skip_packages"] = args.skip_packages
extra_vars["sync"] = "async" if args.async else "sync"
if args.extra_jars is None:
args.extra_jars = []
extra_jars = list()
def add_jar(path):
extra_jars.append({"name": os.path.basename(path), "path": os.path.abspath(path)})
for jar in args.extra_jars:
if os.path.isdir(jar):
for f in os.listdir(jar):
add_jar(os.path.join(jar, f))
else:
add_jar(jar)
# Obtain Cassandra connector jar if cassandra is deployed
if args.deploy_cassandra:
cassandra_jar = get_cassandra_connector_jar(args.spark_version)
add_jar(cassandra_jar)
if args.deploy_elastic:
elastic_jar = get_elastic_jar()
add_jar(elastic_jar)
extra_vars["extra_jars"] = extra_jars
extra_vars["deploy_ignite"] = args.deploy_ignite
extra_vars["ignite_version"] = args.ignite_version
return extra_vars
def err(msg):
print(msg, file=sys.stderr)
sys.exit(1)
# def parse_host_ip(resp):
# """parse ansible debug output with var=hostvars[inventory_hostname].ansible_ssh_host and return host"""
# parts1 = resp.split("=>")
# if len(parts1) != 2: err("unexpected ansible output1")
# parts2 = parts1[1].split(":")
# if len(parts2) != 2: err("unexpected ansible output2")
# parts3 = parts2[1].split('"')
# if len(parts3) != 3: err("unexpected ansible output3")
# return parts3[1]
# def get_master_ip():
# res = subprocess.check_output([ansible_cmd,
# "-i", "openstack_inventory.py",
# "--extra-vars", repr(make_extra_vars()),
# "-m", "debug", "-a", "var=hostvars[inventory_hostname].ansible_ssh_host",
# args.cluster_name + "-master"])
# return parse_host_ip(res)
def parse_host_ip(resp):
"""parse ansible debug output with var=hostvars[inventory_hostname].ansible_ssh_host and return host"""
parts1 = resp.split("=>")
if len(parts1) != 2: err("unexpected ansible output")
parts2 = parts1[1].split(":")
if len(parts2) != 3: err("unexpected ansible output")
parts3 = parts2[1].split('"')
if len(parts3) != 3: err("unexpected ansible output")
return parts3[1]
def get_master_ip():
vars = make_extra_vars()
vars['extended_role'] = 'master'
res = subprocess.check_output([ansible_playbook_cmd,
"--extra-vars", repr(vars),
"get_ip.yml"])
return parse_host_ip(res)
def get_ip(role):
vars = make_extra_vars()
vars['extended_role'] = role
res = subprocess.check_output([ansible_playbook_cmd,
"--extra-vars", repr(vars),
"get_ip.yml"])
return parse_host_ip(res)
def ssh_output(host, cmd):
return subprocess.check_output(["ssh", "-q", "-t", "-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-i", args.identity_file, "ubuntu@" + host, cmd])
def ssh_first_slave(master_ip, cmd):
#can't do `head -n1 /opt/spark/conf/slaves` since it's not deployed yet
return ssh_output(master_ip, "ssh %s-slave-1 '%s'" % (args.cluster_name, cmd.replace("'", "'\\''")))
#FIXME: copied from https://github.com/amplab/spark-ec2/blob/branch-1.5/deploy_templates.py
def get_worker_mem_mb(master_ip):
if args.spark_worker_mem_mb is not None:
return args.spark_worker_mem_mb
mem_command = "cat /proc/meminfo | grep MemTotal | awk '{print $2}'"
ssh_first_slave_ = ssh_first_slave(master_ip, mem_command)
if type(ssh_first_slave_) != "int":
print(ssh_first_slave_)
slave_ram_kb = int(ssh_first_slave_)
slave_ram_mb = slave_ram_kb // 1024
# Leave some RAM for the OS, Hadoop daemons, and system caches
if slave_ram_mb > 100*1024:
slave_ram_mb = slave_ram_mb - 15 * 1024 # Leave 15 GB RAM
elif slave_ram_mb > 60*1024:
slave_ram_mb = slave_ram_mb - 10 * 1024 # Leave 10 GB RAM
elif slave_ram_mb > 40*1024:
slave_ram_mb = slave_ram_mb - 6 * 1024 # Leave 6 GB RAM
elif slave_ram_mb > 20*1024:
slave_ram_mb = slave_ram_mb - 3 * 1024 # Leave 3 GB RAM
elif slave_ram_mb > 10*1024:
slave_ram_mb = slave_ram_mb - 2 * 1024 # Leave 2 GB RAM
else:
slave_ram_mb = max(512, slave_ram_mb - 1300) # Leave 1.3 GB RAM
return slave_ram_mb
def get_master_mem(master_ip):
mem_command = "cat /proc/meminfo | grep MemTotal | awk '{print $2}'"
master_ram_kb = int(ssh_output(master_ip, mem_command))
master_ram_mb = master_ram_kb // 1024
# Leave some RAM for the OS, Hadoop daemons, and system caches
if master_ram_mb > 100*1024:
master_ram_mb = master_ram_mb - 15 * 1024 # Leave 15 GB RAM
elif master_ram_mb > 60*1024:
master_ram_mb = master_ram_mb - 10 * 1024 # Leave 10 GB RAM
elif master_ram_mb > 40*1024:
master_ram_mb = master_ram_mb - 6 * 1024 # Leave 6 GB RAM
elif master_ram_mb > 20*1024:
master_ram_mb = master_ram_mb - 3 * 1024 # Leave 3 GB RAM
elif master_ram_mb > 10*1024:
master_ram_mb = master_ram_mb - 2 * 1024 # Leave 2 GB RAM
else:
master_ram_mb = max(512, master_ram_mb - 1300) # Leave 1.3 GB RAM
return "%s" % master_ram_mb
def get_slave_cpus(master_ip):
return int(ssh_first_slave(master_ip, "nproc"))
cmdline = [ansible_playbook_cmd]
cmdline.extend(unknown)
extra_vars = make_extra_vars()
if args.act == "launch":
cmdline_create = cmdline[:]
cmdline_create.extend(["-v", "main.yml", "--extra-vars", repr(extra_vars)])
subprocess.call(cmdline_create)
master_ip = get_master_ip()
print("Cluster launched successfully; Master IP is %s"%(master_ip))
elif args.act == "destroy":
res = subprocess.check_output([ansible_cmd,
"--extra-vars", repr(make_extra_vars()),
"-m", "debug", "-a", "var=groups['%s_slaves']" % args.cluster_name,
args.cluster_name + "-master"])
extra_vars = make_extra_vars()
cmdline_create = cmdline[:]
cmdline_create.extend(["main.yml", "--extra-vars", repr(extra_vars)])
subprocess.call(cmdline_create)
elif args.act == "get-master":
print(get_master_ip())
elif args.act == "config":
extra_vars = make_extra_vars()
extra_vars['roles_dir'] = '../roles'
cmdline_inventory = cmdline[:]
if args.option == 'restart-spark': #Skip installation tasks, run only detect_conf tasks
cmdline_inventory.extend(("--skip-tags", "spark_install"))
elif args.option == 'restart-cassandra':
cmdline_inventory.extend(("--skip-tags", "spark_install,cassandra"))
cmdline_inventory.extend(["%s.yml" % args.option, "--extra-vars", repr(extra_vars)])
subprocess.call(cmdline_inventory)
elif args.act == "runner":
cmdline_create = cmdline[:]
cmdline_create.extend(["prepare_internal_runner.yml", "--extra-vars", repr(extra_vars)])
subprocess.call(cmdline_create)
runner_ip = get_ip('runner')
print("Runner ready; IP is %s"%(runner_ip))
else:
err("unknown action: " + args.act)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ConfigParser
import os
import sys
from yapsy.PluginManager import PluginManager
from modules.util.log import LogFactory
from exception import ParameterNotFoundException, InvalidConfigValueException
import constants
from plugins.contracts import ICartridgeAgentPlugin, IArtifactCommitPlugin, IArtifactCheckoutPlugin, \
IHealthStatReaderPlugin
class Config:
"""
Handles the configuration information of the particular Cartridge Agent
"""
def __init__(self):
pass
AGENT_PLUGIN_EXT = "agent-plugin"
ARTIFACT_CHECKOUT_PLUGIN = "ArtifactCheckoutPlugin"
ARTIFACT_COMMIT_PLUGIN = "ArtifactCommitPlugin"
CARTRIDGE_AGENT_PLUGIN = "CartridgeAgentPlugin"
HEALTH_STAT_PLUGIN = "HealthStatReaderPlugin"
# set log level
log = LogFactory().get_log(__name__)
payload_params = {}
properties = None
""" :type : ConfigParser.SafeConfigParser """
plugins = {}
""" :type dict{str: [PluginInfo]} : """
artifact_checkout_plugin = None
artifact_commit_plugin = None
health_stat_plugin = None
extension_executor = None
application_id = None
""" :type : str """
service_group = None
""" :type : str """
is_clustered = False
""" :type : bool """
service_name = None
""" :type : str """
cluster_id = None
""" :type : str """
cluster_instance_id = None
""" :type : str """
member_id = None
""" :type : str """
instance_id = None
""" :type : str """
network_partition_id = None
""" :type : str """
partition_id = None
""" :type : str """
cartridge_key = None
""" :type : str """
app_path = None
""" :type : str """
repo_url = None
""" :type : str """
ports = []
""" :type : list[str] """
log_file_paths = []
""" :type : list[str] """
is_multiTenant = False
""" :type : bool """
persistence_mappings = None
""" :type : str """
is_commits_enabled = False
""" :type : bool """
is_checkout_enabled = False
""" :type : bool """
listen_address = None
""" :type : str """
is_internal_repo = False
""" :type : bool """
tenant_id = None
""" :type : str """
lb_cluster_id = None
""" :type : str """
min_count = None
""" :type : str """
lb_private_ip = None
""" :type : str """
lb_public_ip = None
""" :type : str """
tenant_repository_path = None
""" :type : str """
super_tenant_repository_path = None
""" :type : str """
deployment = None
""" :type : str """
manager_service_name = None
""" :type : str """
worker_service_name = None
""" :type : str """
dependant_cluster_id = None
""" :type : str """
export_metadata_keys = None
""" :type : str """
import_metadata_keys = None
""" :type : str """
is_primary = False
""" :type : bool """
artifact_update_interval = None
""" :type : str """
lvs_virtual_ip = None
""" :type : str """
initialized = False
""" :type : bool """
activated = False
""" :type : bool """
started = False
""" :type : bool """
ready_to_shutdown = False
""" :type : bool """
maintenance = False
""" :type : bool """
mb_urls = []
""" :type : list """
mb_username = None
""" :type : str """
mb_password = None
""" :type : str """
mb_publisher_timeout = None
""" :type : int """
cep_username = None
""" :type : str """
cep_password = None
""" :type : str """
cep_urls = []
""" :type : list """
artifact_clone_retry_count = None
""" :type : str """
artifact_clone_retry_interval = None
""" :type : str """
port_check_timeout = None
""" :type : str """
@staticmethod
def read_conf_file():
"""
Reads and stores the agent's configuration file
:return: properties object
:rtype: ConfigParser.SafeConfigParser()
"""
conf_file_path = os.path.abspath(os.path.dirname(__file__)) + "/agent.conf"
Config.log.debug("Config file path : %r" % conf_file_path)
properties = ConfigParser.SafeConfigParser()
properties.read(conf_file_path)
# set calculated values
param_file = os.path.abspath(os.path.dirname(__file__)) + "/payload/launch-params"
Config.log.debug("param_file: %r" % param_file)
properties.set("agent", constants.PARAM_FILE_PATH, param_file)
plugins_dir = os.path.abspath(os.path.dirname(__file__)) + "/plugins"
Config.log.debug("plugins_dir: %r" % plugins_dir)
properties.set("agent", constants.PLUGINS_DIR, plugins_dir)
plugins_dir = os.path.abspath(os.path.dirname(__file__)) + "/extensions/py"
properties.set("agent", constants.EXTENSIONS_DIR, plugins_dir)
return properties
@staticmethod
def read_payload_file(param_file_path):
"""
Reads the payload file of the cartridge and stores the values in a dictionary
:param param_file_path: payload parameter file path
:return: Payload parameter dictionary of values
:rtype: dict
"""
Config.log.debug("Param file path : %r" % param_file_path)
try:
payload_params = {}
if param_file_path is not None:
param_file = open(param_file_path)
payload_content = param_file.read()
for param in payload_content.split(","):
if param.strip() != "":
param_value = param.strip().split("=")
try:
if str(param_value[1]).strip().lower() == "null" or str(param_value[1]).strip() == "":
payload_params[param_value[0]] = None
else:
payload_params[param_value[0]] = param_value[1]
except IndexError:
# If an index error comes when reading values, keep on reading
pass
param_file.close()
return payload_params
else:
raise RuntimeError("Payload parameter file not found: %r" % param_file_path)
except Exception as e:
Config.log.exception("Could not read payload parameter file: %s" % e)
@staticmethod
def convert_to_type(value_string):
"""
Determine what type of data to return from the provided string
:param value_string:
:return:
"""
if value_string is None:
return None
value_string = str(value_string).strip()
if value_string == "" or value_string.lower() == "null":
# converted as a null value
return None
if value_string.lower() == "true":
# boolean TRUE
return True
if value_string.lower() == "false":
# boolean FALSE
return False
#
# value_split = value_string.split("|")
# if len(value_split) > 1:
# # can be split using the delimiter, array returned
# return value_split
return value_string
@staticmethod
def read_property(property_key, critical=True):
"""
Returns the value of the provided property
:param critical: If absence of this value should throw an error
:param str property_key: the name of the property to be read
:return: Value of the property
:exception: ParameterNotFoundException if the provided property cannot be found
"""
if Config.properties.has_option("agent", property_key):
temp_str = Config.properties.get("agent", property_key)
Config.log.debug("Reading property: %s = %s", property_key, temp_str)
real_value = Config.convert_to_type(temp_str)
if real_value is not None:
return real_value
if property_key in Config.payload_params:
temp_str = Config.payload_params[property_key]
Config.log.debug("Reading payload parameter: %s = %s", property_key, temp_str)
real_value = Config.convert_to_type(temp_str)
if real_value is not None:
return real_value
# real value is None
if critical:
raise ParameterNotFoundException("Cannot find the value of required parameter: %r" % property_key)
else:
return None
@staticmethod
def get_payload_params():
return Config.payload_params
@staticmethod
def initialize_config():
"""
Read the two inputs and load values to fields
:return: void
"""
Config.properties = Config.read_conf_file()
param_file_path = Config.properties.get("agent", constants.PARAM_FILE_PATH)
Config.payload_params = Config.read_payload_file(param_file_path)
try:
Config.application_id = Config.read_property(constants.APPLICATION_ID)
Config.service_name = Config.read_property(constants.SERVICE_NAME)
Config.cluster_id = Config.read_property(constants.CLUSTER_ID)
Config.ports = Config.read_property(constants.PORTS).replace("'", "").split("|")
Config.is_multiTenant = Config.read_property(constants.MULTITENANT)
Config.tenant_id = Config.read_property(constants.TENANT_ID)
try:
Config.is_clustered = Config.read_property(constants.CLUSTERING, False)
except ParameterNotFoundException:
Config.is_clustered = False
try:
Config.is_commits_enabled = Config.read_property(constants.COMMIT_ENABLED, False)
except ParameterNotFoundException:
try:
Config.is_commits_enabled = Config.read_property(constants.AUTO_COMMIT, False)
except ParameterNotFoundException:
Config.is_commits_enabled = False
try:
Config.is_internal_repo = Config.read_property(constants.INTERNAL)
except ParameterNotFoundException:
Config.is_internal_repo = False
try:
Config.artifact_update_interval = Config.read_property(constants.ARTIFACT_UPDATE_INTERVAL)
except ParameterNotFoundException:
Config.artifact_update_interval = 10
Config.service_group = Config.read_property(constants.SERVICE_GROUP, False)
Config.cluster_instance_id = Config.read_property(constants.CLUSTER_INSTANCE_ID, False)
Config.member_id = Config.read_property(constants.MEMBER_ID, False)
Config.network_partition_id = Config.read_property(constants.NETWORK_PARTITION_ID, False)
Config.partition_id = Config.read_property(constants.PARTITION_ID, False)
Config.app_path = Config.read_property(constants.APPLICATION_PATH, False)
Config.repo_url = Config.read_property(constants.REPO_URL, False)
if Config.repo_url is not None:
Config.cartridge_key = Config.read_property(constants.CARTRIDGE_KEY)
else:
Config.cartridge_key = Config.read_property(constants.CARTRIDGE_KEY, False)
Config.dependant_cluster_id = Config.read_property(constants.DEPENDENCY_CLUSTER_IDS, False)
Config.export_metadata_keys = Config.read_property(constants.EXPORT_METADATA_KEYS, False)
Config.import_metadata_keys = Config.read_property(constants.IMPORT_METADATA_KEYS, False)
Config.lvs_virtual_ip = Config.read_property(constants.LVS_VIRTUAL_IP, False)
try:
Config.log_file_paths = Config.read_property(constants.LOG_FILE_PATHS).split("|")
except ParameterNotFoundException:
Config.log_file_paths = None
Config.persistence_mappings = Config.read_property(constants.PERSISTENCE_MAPPING, False)
Config.is_checkout_enabled = Config.read_property(constants.AUTO_CHECKOUT, False)
Config.listen_address = Config.read_property(constants.LISTEN_ADDRESS, False)
Config.lb_cluster_id = Config.read_property(constants.LB_CLUSTER_ID, False)
Config.min_count = Config.read_property(constants.MIN_INSTANCE_COUNT, False)
Config.lb_private_ip = Config.read_property(constants.LB_PRIVATE_IP, False)
Config.lb_public_ip = Config.read_property(constants.LB_PUBLIC_IP, False)
Config.tenant_repository_path = Config.read_property(constants.TENANT_REPO_PATH, False)
Config.super_tenant_repository_path = Config.read_property(constants.SUPER_TENANT_REPO_PATH, False)
Config.is_primary = Config.read_property(constants.CLUSTERING_PRIMARY_KEY, False)
Config.mb_username = Config.read_property(constants.MB_USERNAME, False)
Config.mb_password = Config.read_property(constants.MB_PASSWORD, False)
Config.mb_urls = Config.read_property(constants.MB_URLS)
Config.mb_publisher_timeout = int(Config.read_property(constants.MB_PUBLISHER_TIMEOUT))
Config.cep_username = Config.read_property(constants.CEP_SERVER_ADMIN_USERNAME)
Config.cep_password = Config.read_property(constants.CEP_SERVER_ADMIN_PASSWORD)
Config.cep_urls = Config.read_property(constants.CEP_RECEIVER_URLS)
try:
Config.artifact_clone_retry_count = Config.read_property(constants.ARTIFACT_CLONE_RETRIES)
except ParameterNotFoundException:
Config.artifact_clone_retry_count = "5"
try:
Config.artifact_clone_retry_interval = Config.read_property(constants.ARTIFACT_CLONE_INTERVAL)
except ParameterNotFoundException:
Config.artifact_clone_retry_interval = "10"
try:
Config.port_check_timeout = Config.read_property(constants.PORT_CHECK_TIMEOUT)
except ParameterNotFoundException:
Config.port_check_timeout = "600000"
Config.validate_config()
except ParameterNotFoundException as ex:
raise RuntimeError(ex)
Config.log.info("Cartridge agent configuration initialized")
Config.log.debug("service-name: %r" % Config.service_name)
Config.log.debug("cluster-id: %r" % Config.cluster_id)
Config.log.debug("cluster-instance-id: %r" % Config.cluster_instance_id)
Config.log.debug("member-id: %r" % Config.member_id)
Config.log.debug("network-partition-id: %r" % Config.network_partition_id)
Config.log.debug("partition-id: %r" % Config.partition_id)
Config.log.debug("cartridge-key: %r" % Config.cartridge_key)
Config.log.debug("app-path: %r" % Config.app_path)
Config.log.debug("repo-url: %r" % Config.repo_url)
Config.log.debug("ports: %r" % str(Config.ports))
Config.log.debug("lb-private-ip: %r" % Config.lb_private_ip)
Config.log.debug("lb-public-ip: %r" % Config.lb_public_ip)
Config.log.debug("dependant_cluster_id: %r" % Config.dependant_cluster_id)
Config.log.debug("export_metadata_keys: %r" % Config.export_metadata_keys)
Config.log.debug("import_metadata_keys: %r" % Config.import_metadata_keys)
Config.log.debug("artifact.update.interval: %r" % Config.artifact_update_interval)
Config.log.debug("lvs-virtual-ip: %r" % Config.lvs_virtual_ip)
Config.log.debug("log_file_paths: %s" % Config.log_file_paths)
Config.log.info("Initializing plugins")
Config.initialize_plugins()
Config.extension_executor = Config.initialize_extensions()
@staticmethod
def validate_config():
try:
Config.validate_url_list(Config.mb_urls, constants.MB_URLS)
Config.validate_int(Config.mb_publisher_timeout, constants.MB_PUBLISHER_TIMEOUT)
Config.validate_url_list(Config.cep_urls, constants.CEP_RECEIVER_URLS)
Config.validate_int(Config.artifact_update_interval, constants.ARTIFACT_UPDATE_INTERVAL)
Config.validate_int(Config.artifact_clone_retry_count, constants.ARTIFACT_CLONE_RETRIES)
Config.validate_int(Config.artifact_clone_retry_interval, constants.ARTIFACT_CLONE_INTERVAL)
Config.validate_int(Config.port_check_timeout, constants.PORT_CHECK_TIMEOUT)
except ValueError as err:
raise InvalidConfigValueException("Invalid configuration for Cartridge Agent", err)
@staticmethod
def validate_url_list(urls, field_name):
"""
host1:port1,host2:port2
:param urls:
:param field_name:
:return:
"""
url_list = str(urls).split(",")
if len(url_list) < 1:
raise ValueError("Invalid value [field] \"%s\"" % field_name)
for single_url in url_list:
try:
url_ip, url_port = single_url.split(":")
except ValueError:
raise ValueError("Invalid host or port number value for [field] %s", field_name)
@staticmethod
def validate_int(int_value, field_name):
"""
valid integer value
:param int_value:
:param field_name:
:return:
"""
try:
int(int_value)
except ValueError:
raise ValueError("Invalid int value for [field] %s " % field_name)
@staticmethod
def initialize_plugins():
""" Find, load, activate and group plugins for Python CA
:return: a tuple of (PluginManager, plugins, artifact management plugins)
"""
Config.log.info("Collecting and loading plugins")
try:
# TODO: change plugin descriptor ext, plugin_manager.setPluginInfoExtension(AGENT_PLUGIN_EXT)
plugins_dir = Config.read_property(constants.PLUGINS_DIR)
category_filter = {Config.CARTRIDGE_AGENT_PLUGIN: ICartridgeAgentPlugin,
Config.ARTIFACT_CHECKOUT_PLUGIN: IArtifactCheckoutPlugin,
Config.ARTIFACT_COMMIT_PLUGIN: IArtifactCommitPlugin,
Config.HEALTH_STAT_PLUGIN: IHealthStatReaderPlugin}
plugin_manager = Config.create_plugin_manager(category_filter, plugins_dir)
# activate cartridge agent plugins
plugins = plugin_manager.getPluginsOfCategory(Config.CARTRIDGE_AGENT_PLUGIN)
grouped_ca_plugins = {}
for plugin_info in plugins:
Config.log.debug("Found plugin [%s] at [%s]" % (plugin_info.name, plugin_info.path))
plugin_manager.activatePluginByName(plugin_info.name)
Config.log.info("Activated plugin [%s]" % plugin_info.name)
mapped_events = plugin_info.description.split(",")
for mapped_event in mapped_events:
if mapped_event.strip() != "":
if grouped_ca_plugins.get(mapped_event) is None:
grouped_ca_plugins[mapped_event] = []
grouped_ca_plugins[mapped_event].append(plugin_info)
Config.plugins = grouped_ca_plugins
# activate artifact management plugins
artifact_checkout_plugins = plugin_manager.getPluginsOfCategory(Config.ARTIFACT_CHECKOUT_PLUGIN)
for plugin_info in artifact_checkout_plugins:
Config.log.debug("Found artifact checkout plugin [%s] at [%s]" % (plugin_info.name, plugin_info.path))
# if multiple artifact management plugins are registered, halt agent execution. This is to avoid any
# undesired outcome due to errors made in deployment
if Config.is_checkout_enabled:
if len(artifact_checkout_plugins) == 0:
Config.log.exception(
"No plugins registered for artifact checkout extension. Stratos agent failed to start")
sys.exit(1)
elif len(artifact_checkout_plugins) == 1:
plugin_info = artifact_checkout_plugins[0]
Config.log.debug("Found artifact checkout plugin [%s] at [%s]" %
(plugin_info.name, plugin_info.path))
plugin_manager.activatePluginByName(plugin_info.name)
Config.log.info("Activated artifact checkout plugin [%s]" % plugin_info.name)
Config.artifact_checkout_plugin = plugin_info
elif len(artifact_checkout_plugins) > 1:
Config.log.exception(
"Multiple plugins registered for artifact checkout. Stratos agent failed to start.")
sys.exit(1)
artifact_commit_plugins = plugin_manager.getPluginsOfCategory(Config.ARTIFACT_COMMIT_PLUGIN)
for plugin_info in artifact_commit_plugins:
Config.log.debug("Found artifact commit plugin [%s] at [%s]" % (plugin_info.name, plugin_info.path))
if Config.is_commits_enabled:
if len(artifact_commit_plugins) == 0:
Config.log.exception(
"No plugins registered for artifact commit extension. Stratos agent failed to start")
sys.exit(1)
elif len(artifact_commit_plugins) == 1:
plugin_info = artifact_commit_plugins[0]
Config.log.debug("Found artifact commit plugin [%s] at [%s]" %
(plugin_info.name, plugin_info.path))
plugin_manager.activatePluginByName(plugin_info.name)
Config.log.info("Activated artifact commit plugin [%s]" % plugin_info.name)
Config.artifact_commit_plugin = plugin_info
elif len(artifact_commit_plugins) > 1:
Config.log.exception(
"Multiple plugins registered for artifact checkout. Stratos agent failed to start.")
sys.exit(1)
health_stat_plugins = plugin_manager.getPluginsOfCategory(Config.HEALTH_STAT_PLUGIN)
for plugin_info in health_stat_plugins:
Config.log.debug("Found health stats reader plugin [%s] at [%s]" % (plugin_info.name, plugin_info.path))
# If multiple health stat reader plugins are registered, halt agent execution. This is to avoid any
# undesired outcome due to errors made in deployment
if len(health_stat_plugins) == 0:
Config.log.exception(
"No plugins registered for health statistics reader. Stratos agent failed to start.")
sys.exit(1)
elif len(health_stat_plugins) == 1:
plugin_info = health_stat_plugins[0]
Config.log.debug("Found health statistics reader plugin [%s] at [%s]" %
(plugin_info.name, plugin_info.path))
plugin_manager.activatePluginByName(plugin_info.name)
Config.log.info("Activated health statistics reader plugin [%s]" % plugin_info.name)
Config.health_stat_plugin = plugin_info
elif len(health_stat_plugins) > 1:
Config.log.exception(
"Multiple plugins registered for health statistics reader. Stratos agent failed to start.")
sys.exit(1)
except ParameterNotFoundException as e:
Config.log.exception("Could not load plugins. Plugins directory not set: %s" % e)
Config.log.error("Stratos agent failed to start")
sys.exit(1)
except Exception as e:
Config.log.exception("Error while loading plugins: %s" % e)
Config.log.error("Stratos agent failed to start")
sys.exit(1)
@staticmethod
def initialize_extensions():
""" Find, load and activate extension scripts for Python CA. The extensions are mapped to the event by the
name used in the plugin descriptor.
:return:a tuple of (PluginManager, extensions)
"""
Config.log.info("Collecting and loading extensions")
try:
extensions_dir = Config.read_property(constants.EXTENSIONS_DIR)
category_filter = {Config.CARTRIDGE_AGENT_PLUGIN: ICartridgeAgentPlugin}
extension_manager = Config.create_plugin_manager(category_filter, extensions_dir)
all_extensions = extension_manager.getPluginsOfCategory(Config.CARTRIDGE_AGENT_PLUGIN)
for plugin_info in all_extensions:
try:
Config.log.debug("Found extension executor [%s] at [%s]" % (plugin_info.name, plugin_info.path))
extension_manager.activatePluginByName(plugin_info.name)
extension_executor = plugin_info
Config.log.info("Activated extension executor [%s]" % plugin_info.name)
# extension executor found. break loop and return
return extension_executor
except Exception as ignored:
pass
# no extension executor plugin could be loaded or activated
raise RuntimeError("Couldn't activated any ExtensionExecutor plugin")
except ParameterNotFoundException as e:
Config.log.exception("Could not load extensions. Extensions directory not set: %s" % e)
return None
except Exception as e:
Config.log.exception("Error while loading extension: %s" % e)
return None
@staticmethod
def create_plugin_manager(category_filter, plugin_place):
""" Creates a PluginManager object from the given folder according to the given filter
:param category_filter:
:param plugin_place:
:return:
:rtype: PluginManager
"""
plugin_manager = PluginManager()
plugin_manager.setCategoriesFilter(category_filter)
plugin_manager.setPluginPlaces([plugin_place])
plugin_manager.collectPlugins()
return plugin_manager
|
|
#!flask/bin/python
import unittest
import os
from datetime import time
from sqlalchemy.exc import IntegrityError
from app import app, db
from config import basedir
from app.models import Address, FoodResource, TimeSlot, User, Role, PhoneNumber
from app.utils import *
class TestCase(unittest.TestCase):
# Run at the beginning of every test.
def setUp(self):
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, 'test.db')
self.app = app.test_client()
db.create_all()
self.create_vars()
# Run at the end of every test.
def tearDown(self):
db.session.remove()
db.drop_all()
# Sets up global variables that will be used in several tests.
def create_vars(self):
self.a1 = Address(line1 = '1234 MB 1234', line2 = '3700 Spruce St',
city = 'Philadelphia', state = 'PA', zip_code = '14109')
self.a2 = Address(line1 = '4567 MB 1234', line2 = '3600 Spruce St',
city = 'Philadelphia', state = 'PA', zip_code = '14109')
self.timeslots_list = \
[TimeSlot(day_of_week = 0, start_time = time(8,0),
end_time = time(18,30)),
TimeSlot(day_of_week = 1, start_time = time(7,0),
end_time = time(19,30)),
TimeSlot(day_of_week = 2, start_time = time(7,30),
end_time = time(18,30)),
TimeSlot(day_of_week = 3, start_time = time(8,0),
end_time = time(19,30)),
TimeSlot(day_of_week = 4, start_time = time(10,0),
end_time = time(15,30)),
TimeSlot(day_of_week = 5, start_time = time(8,15),
end_time = time(18,45)),
TimeSlot(day_of_week = 6, start_time = time(9,0),
end_time = time(20,45))]
self.timeslots_list2 = \
[TimeSlot(day_of_week = 0, start_time = time(8,0),
end_time = time(18,30)),
TimeSlot(day_of_week = 1, start_time = time(7,0),
end_time = time(19,30)),
TimeSlot(day_of_week = 3, start_time = time(8,0),
end_time = time(19,30)),
TimeSlot(day_of_week = 4, start_time = time(10,0),
end_time = time(15,30)),
TimeSlot(day_of_week = 5, start_time = time(8,15),
end_time = time(18,45)),
TimeSlot(day_of_week = 6, start_time = time(9,0),
end_time = time(20,45))]
self.desc = """Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Donec sem neque, vehicula ac nisl at, porta porttitor enim.
Suspendisse nibh eros, pulvinar nec risus a, dignissim efficitur
diam. Phasellus vestibulum posuere ex, vel hendrerit turpis molestie
sit amet. Nullam ornare magna quis urna sodales, vel iaculis purus
consequat. Mauris laoreet enim ipsum. Cum sociis natoque penatibus
et magnis dis parturient montes, nascetur ridiculus mus. Nulla
facilisi. In et dui ante. Morbi elementum dolor ligula, et mollis
magna interdum non. Mauris ligula mi, mattis at ex ut, pellentesque
porttitor elit. Integer volutpat elementum tristique. Ut interdum,
mauris a feugiat euismod, tortor."""
self.u1 = User(email='[email protected]', password = 'pass123',
first_name = 'Ben', last_name = 'Sandler',
roles=[Role(name = 'User')], is_enabled = True)
self.u2 = User(email = '[email protected]', password = 'p@$$w0rd',
first_name = 'Steve', last_name = 'Smith',
roles = [Role(name = 'User')], is_enabled = True)
self.u3 = User(email = '[email protected]',
password = '139rjf9i#@$#R$#!#!!!48939832984893rfcnj3@#%***^%$#@#$@#',
first_name = 'Sarah', last_name = 'Smith',
roles = [Role(name = 'Admin')], is_enabled = True)
self.p1 = PhoneNumber(number = '1234567898')
self.p2 = PhoneNumber(number = '9876543215')
# Adds two valid addresses to the database and then checks that both can be
# retrieved and that a bad query returns no results.
def test_writing_reading_address(self):
db.session.add(self.a1)
db.session.add(self.a2)
db.session.commit()
assert len(Address.query.filter_by(zip_code = '14109').all()) == 2
assert len(Address.query.filter_by(zip_code = '14109', city = 'New York').all()) == 0
# Adds a valid Address to the database and then makes sure it can be
# retrieved by name and address.
def test_create_valid_food_resource(self):
# Create yellow colored pin.
cp_yellow = ColoredPin(
color_name="Yellow",
hex_color="fdd800",
pin_image_name="mb_yellow.png"
)
db.session.add(cp_yellow)
# Create farmers' market food resource type.
frt_farmers_market = FoodResourceType(
name_singular="Farmers' Market",
name_plural="Farmers' Markets",
colored_pin=cp_yellow)
db.session.add(frt_farmers_market)
# Create a farmers' market food resource.
r1 = FoodResource(name = 'Test Food Resource 1', address = self.a1,
phone_numbers=[self.p2], timeslots = self.timeslots_list,
description = self.desc, food_resource_type = frt_farmers_market)
db.session.add(r1)
db.session.commit()
assert len(FoodResource.query.filter_by(name = 'Test Food Resource 1')
.all()) == 1
assert len(FoodResource.query.filter_by(address = self.a1)
.all()) == 1
assert len(FoodResource.query.filter_by(name = 'Test Food Resource 1')
.first().timeslots) == 7
def test_create_user(self):
db.session.add(self.u1)
db.session.commit()
assert len(Role.query.filter_by(name = 'User').all()) == 1
u = User.query.filter_by(email = '[email protected]').first()
assert u
assert u.verify_password('pass123')
assert not(u.verify_password('pass124'))
def test_create_multiple_users(self):
db.session.add(self.u1)
db.session.add(self.u2)
db.session.add(self.u3)
db.session.commit()
assert len(Role.query.filter_by(name = 'User').all()) == 2
assert len(Role.query.filter_by(name = 'Admin').all()) == 1
assert len(Role.query.filter_by(name = 'N/A').all()) == 0
u = User.query.filter_by(email = '[email protected]').first()
assert u.verify_password('139rjf9i#@$#R$#!#!!!48939832984893rfcnj3@#%***^%$#@#$@#')
assert not(u.verify_password('239rjf9i#@$#R$#!#!!!48939832984893rfcnj3@#%***^%$#@#$@#'))
def test_is_open(self):
# Create yellow colored pin.
cp_yellow = ColoredPin(
color_name="Yellow",
hex_color="fdd800",
pin_image_name="mb_yellow.png"
)
db.session.add(cp_yellow)
# Create farmers' market food resource type.
frt_farmers_market = FoodResourceType(
name_singular="Farmers' Market",
name_plural="Farmers' Markets",
colored_pin=cp_yellow)
db.session.add(frt_farmers_market)
open_pairs = \
[OpenMonthPair(start_month = 1, end_month = 3),
OpenMonthPair(start_month = 5, end_month = 7),
OpenMonthPair(start_month = 10, end_month = 11)]
r1 = FoodResource(name = 'Test Food Resource 1', address = self.a1,
phone_numbers=[self.p2], timeslots = self.timeslots_list,
description = self.desc, food_resource_type = frt_farmers_market
)
r2 = FoodResource(name = 'Test Food Resource 1', address = self.a1,
phone_numbers=[self.p2], timeslots = self.timeslots_list2,
description = self.desc, food_resource_type = frt_farmers_market
)
r1.open_month_pairs.append(OpenMonthPair(start_month = 1, end_month = 3))
r1.open_month_pairs.append(OpenMonthPair(start_month = 5, end_month = 7))
r1.open_month_pairs.append(OpenMonthPair(start_month = 10, end_month = 11))
r2.open_month_pairs.append(OpenMonthPair(start_month = 1, end_month = 3))
r2.open_month_pairs.append(OpenMonthPair(start_month = 5, end_month = 7))
r2.open_month_pairs.append(OpenMonthPair(start_month = 10, end_month = 11))
# Right month right time
assert is_open(resource = r1,
current_date = datetime(year = 2014, month = 11, day = 24, hour = 12, minute = 30))
assert is_open(resource = r1,
current_date = datetime(year = 2014, month = 2, day = 24, hour = 10, minute = 31))
# Wrong month wrong time
assert not is_open(resource = r1,
current_date = datetime(year = 2014, month = 9, day = 13, hour = 21, minute = 30))
# Wrong month right time
assert not is_open(resource = r1,
current_date = datetime(year = 2014, month = 9, day = 13, hour = 10, minute = 22))
# Right month wrong time
assert not is_open(resource = r1,
current_date = datetime(year = 2014, month = 2, day = 13, hour = 21, minute = 30))
# Right month, closed on Tuesdays
assert not is_open(resource = r2,
current_date = datetime(year = 2014, month = 11, day = 25, hour = 10, minute = 30))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2015-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import sys
import uuid
sys.path[0:0] = [""]
from test import client_context, unittest
from test.test_client import IntegrationTest
from test.utils import rs_or_single_client
from bson import decode, encode
from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation
from bson.codec_options import CodecOptions
from bson.errors import InvalidBSON
from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument
from bson.son import SON
class TestRawBSONDocument(IntegrationTest):
# {'_id': ObjectId('556df68b6e32ab21a95e0785'),
# 'name': 'Sherlock',
# 'addresses': [{'street': 'Baker Street'}]}
bson_string = (
b"Z\x00\x00\x00\x07_id\x00Um\xf6\x8bn2\xab!\xa9^\x07\x85\x02name\x00\t"
b"\x00\x00\x00Sherlock\x00\x04addresses\x00&\x00\x00\x00\x030\x00\x1e"
b"\x00\x00\x00\x02street\x00\r\x00\x00\x00Baker Street\x00\x00\x00\x00"
)
document = RawBSONDocument(bson_string)
@classmethod
def setUpClass(cls):
super(TestRawBSONDocument, cls).setUpClass()
client_context.client = rs_or_single_client()
cls.client = client_context.client
def tearDown(self):
if client_context.connected:
self.client.pymongo_test.test_raw.drop()
def test_decode(self):
self.assertEqual("Sherlock", self.document["name"])
first_address = self.document["addresses"][0]
self.assertIsInstance(first_address, RawBSONDocument)
self.assertEqual("Baker Street", first_address["street"])
def test_raw(self):
self.assertEqual(self.bson_string, self.document.raw)
def test_empty_doc(self):
doc = RawBSONDocument(encode({}))
with self.assertRaises(KeyError):
doc["does-not-exist"]
def test_invalid_bson_sequence(self):
bson_byte_sequence = encode({"a": 1}) + encode({})
with self.assertRaisesRegex(InvalidBSON, "invalid object length"):
RawBSONDocument(bson_byte_sequence)
def test_invalid_bson_eoo(self):
invalid_bson_eoo = encode({"a": 1})[:-1] + b"\x01"
with self.assertRaisesRegex(InvalidBSON, "bad eoo"):
RawBSONDocument(invalid_bson_eoo)
@client_context.require_connection
def test_round_trip(self):
db = self.client.get_database(
"pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument)
)
db.test_raw.insert_one(self.document)
result = db.test_raw.find_one(self.document["_id"])
assert result is not None
self.assertIsInstance(result, RawBSONDocument)
self.assertEqual(dict(self.document.items()), dict(result.items()))
@client_context.require_connection
def test_round_trip_raw_uuid(self):
coll = self.client.get_database("pymongo_test").test_raw
uid = uuid.uuid4()
doc = {"_id": 1, "bin4": Binary(uid.bytes, 4), "bin3": Binary(uid.bytes, 3)}
raw = RawBSONDocument(encode(doc))
coll.insert_one(raw)
self.assertEqual(coll.find_one(), doc)
uuid_coll = coll.with_options(
codec_options=coll.codec_options.with_options(
uuid_representation=UuidRepresentation.STANDARD
)
)
self.assertEqual(
uuid_coll.find_one(), {"_id": 1, "bin4": uid, "bin3": Binary(uid.bytes, 3)}
)
# Test that the raw bytes haven't changed.
raw_coll = coll.with_options(codec_options=DEFAULT_RAW_BSON_OPTIONS)
self.assertEqual(raw_coll.find_one(), raw)
def test_with_codec_options(self):
# {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000),
# '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')}
# encoded with JAVA_LEGACY uuid representation.
bson_string = (
b"-\x00\x00\x00\x05_id\x00\x10\x00\x00\x00\x03eI_\x97\x8f\xabo\x02"
b"\xff`L\x87\xad\x85\xbf\x9f\tdate\x00\x8a\xd6\xb9\xbaM"
b"\x01\x00\x00\x00"
)
document = RawBSONDocument(
bson_string,
codec_options=CodecOptions(
uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument
),
)
self.assertEqual(uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), document["_id"])
@client_context.require_connection
def test_round_trip_codec_options(self):
doc = {
"date": datetime.datetime(2015, 6, 3, 18, 40, 50, 826000),
"_id": uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"),
}
db = self.client.pymongo_test
coll = db.get_collection(
"test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY)
)
coll.insert_one(doc)
raw_java_legacy = CodecOptions(
uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument
)
coll = db.get_collection("test_raw", codec_options=raw_java_legacy)
self.assertEqual(
RawBSONDocument(encode(doc, codec_options=raw_java_legacy)), coll.find_one()
)
@client_context.require_connection
def test_raw_bson_document_embedded(self):
doc = {"embedded": self.document}
db = self.client.pymongo_test
db.test_raw.insert_one(doc)
result = db.test_raw.find_one()
assert result is not None
self.assertEqual(decode(self.document.raw), result["embedded"])
# Make sure that CodecOptions are preserved.
# {'embedded': [
# {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000),
# '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')}
# ]}
# encoded with JAVA_LEGACY uuid representation.
bson_string = (
b"D\x00\x00\x00\x04embedded\x005\x00\x00\x00\x030\x00-\x00\x00\x00"
b"\tdate\x00\x8a\xd6\xb9\xbaM\x01\x00\x00\x05_id\x00\x10\x00\x00"
b"\x00\x03eI_\x97\x8f\xabo\x02\xff`L\x87\xad\x85\xbf\x9f\x00\x00"
b"\x00"
)
rbd = RawBSONDocument(
bson_string,
codec_options=CodecOptions(
uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument
),
)
db.test_raw.drop()
db.test_raw.insert_one(rbd)
result = db.get_collection(
"test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY)
).find_one()
assert result is not None
self.assertEqual(rbd["embedded"][0]["_id"], result["embedded"][0]["_id"])
@client_context.require_connection
def test_write_response_raw_bson(self):
coll = self.client.get_database(
"pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument)
).test_raw
# No Exceptions raised while handling write response.
coll.insert_one(self.document)
coll.delete_one(self.document)
coll.insert_many([self.document])
coll.delete_many(self.document)
coll.update_one(self.document, {"$set": {"a": "b"}}, upsert=True)
coll.update_many(self.document, {"$set": {"b": "c"}})
def test_preserve_key_ordering(self):
keyvaluepairs = [
("a", 1),
("b", 2),
("c", 3),
]
rawdoc = RawBSONDocument(encode(SON(keyvaluepairs)))
for rkey, elt in zip(rawdoc, keyvaluepairs):
self.assertEqual(rkey, elt[0])
if __name__ == "__main__":
unittest.main()
|
|
#
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
"""
Contains functions that invoke the playbook.
"""
import sys
import json
import traceback
import argparse
from collections import namedtuple
import errno
from ansible.module_utils._text import to_bytes, to_text
from ansible.utils.color import stringc
from ansible import constants as CONST
verbosity = CONST.DEFAULT_VERBOSITY or 0
# Overrides the default logger from ansible/utils/display.py.
# fabric_ansible_logger customizes log message formatting
# Note that some internal ansible code inherits "display" from __main__,
# which is this file.
# Also note that CONST is from ansible.cfg
#
from job_manager.fabric_logger import fabric_ansible_logger
logger = fabric_ansible_logger("ansible")
# Overrides the default display processing from ansible/utils/display.py
# The default display method supresses certain output for remote hosts
# while we want this information sent to the logs
#
def fabric_ansible_display(self, msg, color=None, stderr=False,
screen_only=False, log_only=False):
""" Display a message to the user
Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
"""
nocolor = msg
if color:
msg = stringc(msg, color)
if not log_only:
if not msg.endswith(u'\n'):
msg2 = msg + u'\n'
else:
msg2 = msg
msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr),
errors='replace')
# Note: After Display() class is refactored need to update the
# log capture code in 'bin/ansible-connection' (and other
# relevant places).
if not stderr:
fileobj = sys.stdout
else:
fileobj = sys.stderr
fileobj.write(msg2)
try:
fileobj.flush()
except IOError as e:
# Ignore EPIPE in case fileobj has been prematurely closed, eg.
# when piping to "head -n1"
if e.errno != errno.EPIPE:
raise
if logger:
msg2 = nocolor.lstrip(u'\n')
msg2 = to_bytes(msg2)
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr))
if color == CONST.COLOR_ERROR:
logger.error(msg2)
else:
logger.info(msg2)
import ansible.utils.display as default_display
default_display.logger = logger
default_display.Display.display = fabric_ansible_display
from ansible.utils.display import Display
display = Display(verbosity)
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.executor.playbook_executor import PlaybookExecutor
from job_manager.job_messages import MsgBundle
from job_manager.job_manager_logger import job_mgr_logger
JM_LOGGER = job_mgr_logger("FabricAnsible")
class PlaybookHelper(object):
def get_plugin_output(self, pbex):
output_json = pbex._tqm._variable_manager._nonpersistent_fact_cache[
'localhost'].get('output')
return output_json
def execute_playbook(self, playbook_info):
try:
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=['localhost'])
variable_manager = VariableManager(loader=loader,
inventory=inventory)
Options = namedtuple('Options',
['listtags', 'listtasks', 'listhosts',
'syntax', 'connection', 'module_path',
'forks', 'remote_user', 'private_key_file',
'ssh_common_args', 'ssh_extra_args',
'sftp_extra_args', 'scp_extra_args',
'become', 'become_method', 'become_user',
'verbosity', 'check', 'diff'])
options = Options(listtags=False, listtasks=False, listhosts=False,
syntax=False, connection='ssh', module_path=None,
forks=100, remote_user=None,
private_key_file=None, ssh_common_args=None,
ssh_extra_args=None, sftp_extra_args=None,
scp_extra_args=None, become=None,
become_method=None, become_user=None,
verbosity=None, check=False, diff=False)
variable_manager.extra_vars = playbook_info['extra_vars']
pbex = PlaybookExecutor(playbooks=[playbook_info['uri']],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options, passwords=None)
ret_val = pbex.run()
if ret_val != 0:
msg = MsgBundle.getMessage(MsgBundle.
PLAYBOOK_RETURN_WITH_ERROR)
raise Exception(msg)
output = self.get_plugin_output(pbex)
if output is None or output.get('status') is None:
msg = MsgBundle.getMessage(MsgBundle.
PLAYBOOK_OUTPUT_MISSING)
raise Exception(msg)
if output.get('status').lower() == "failure":
msg = MsgBundle.getMessage(MsgBundle.
PLAYBOOK_STATUS_FAILED)
raise Exception(msg)
return output
except Exception as exp:
msg = MsgBundle.getMessage(MsgBundle.PLAYBOOK_EXECUTE_ERROR,
playbook_uri=playbook_info['uri'],
execution_id=playbook_info['extra_vars']
['playbook_input']['job_execution_id'],
exc_msg=repr(exp))
if exp.message:
msg = msg + "\n" + exp.message
JM_LOGGER.error(msg)
# after handling exception, write an END
# to stop listening to the file if created
unique_pb_id = playbook_info['extra_vars'][
'playbook_input']['unique_pb_id']
exec_id = playbook_info['extra_vars']['playbook_input'][
'job_execution_id']
with open("/tmp/"+exec_id, "a") as f:
f.write(unique_pb_id + 'END' + '\n')
sys.exit(msg)
def parse_args():
parser = argparse.ArgumentParser(description='Ansible playbook input '
'parameters')
parser.add_argument('-i', '--playbook_input', nargs=1,
help='Playbook input json')
return parser.parse_args()
if __name__ == "__main__":
playbook_input_json = None
try:
playbook_params = parse_args()
playbook_input_json = json.loads(playbook_params.playbook_input[0])
if playbook_input_json is None:
sys.exit(MsgBundle.getMessage(MsgBundle.NO_PLAYBOOK_INPUT_DATA))
except Exception as exp:
ERR_MSG = "Failed to start playbook due "\
"to Exception: %s" % traceback.print_stack()
JM_LOGGER.error(ERR_MSG)
sys.exit(MsgBundle.getMessage(MsgBundle.PLAYBOOK_INPUT_PARSING_ERROR,
exc_msg=repr(exp)))
playbook_helper = PlaybookHelper()
pb_output = playbook_helper.execute_playbook(playbook_input_json)
# if it comes here, it implies the pb_output is of correct
# format and is present with status Sucess. So derive the
# playbook output to be written to file and finally write END to the file
try:
unique_pb_id = playbook_input_json['extra_vars'][
'playbook_input']['unique_pb_id']
exec_id = playbook_input_json['extra_vars']['playbook_input'][
'job_execution_id']
# messages to be given to next playbooks(s)
JM_LOGGER.info("Printing pb output results from pb_helper.py -->>>")
JM_LOGGER.info(pb_output)
line_in_file = unique_pb_id + 'PLAYBOOK_OUTPUT##'\
+ json.dumps(pb_output) + 'PLAYBOOK_OUTPUT##'\
+ '\n'
with open("/tmp/"+exec_id, "a") as f:
f.write(line_in_file + unique_pb_id + 'END' + '\n')
except Exception, exc:
ERR_MSG = "Error while trying to parse output"\
" from playbook due to exception: %s"\
% str(exc)
with open("/tmp/"+exec_id, "a") as f:
f.write(unique_pb_id + 'END' + '\n')
JM_LOGGER.error(ERR_MSG)
# not stopping execution just because of parsing error
# no sys.exit therefore
|
|
# Pixie - last updated for NodeBox 1.9.3
# Author: Tom De Smedt <[email protected]>
# Manual: http://nodebox.net/code/index.php/Pixie
# Copyright (c) 2007 by Tom De Smedt.
# See LICENSE.txt for details.
# Remember to install the fonts before running the library.
from random import choice
from nodebox.graphics import CENTER, CORNER, CMYK
from nodebox.util import random, choice
SPACING = 1.2
def spacing(x=1.2):
"""Sets the lineheight or linespacing
for a pixie text paragraph().
"""
global SPACING
SPACING = x
lineheight = spacing
COLOR = (0.8, 0.4, 0.0, 0.4)
def color(c=0.8, m=0.4, y=0.0, k=0.4):
"""Sets the text color
for a pixie text paragraph().
"""
global COLOR
COLOR = (c, m, y, k)
KEYWORDS = []
KEYWORDS_ALL = False
def keywords(list=[], all=False):
"""Sets a list of keywords pixie should mark as important.
The paragraph() command uses this list to determine which
words should be drawn within a border. When all=True,
it draws any occurence of a keyword in a border,
but by default only the first one.
"""
#If list is a string, convert it to a list.
from types import StringType
if type(list) == StringType: list = [list]
global KEYWORDS, KEYWORDS_ALL
KEYWORDS = list
KEYWORDS_ALL = all
def border(x, y, width, pt=20, slant=0.5):
"""Draws a handrwritten rectangle at (x,y) with given width.
This command is usually called from within
the pixie text paragraph() command as a callback
that draws borders around words.
"""
_ctx.transform(CENTER)
#Somewhere in the Pixie-Dingbats font are rectangle characters.
glyph = choice(("A", "B"))
#A thing about borders is that you
#usually draw one, and then move your pen around on those lines,
#so the border becomes really thick and visible.
#The same is done here with a for-loop.
for i in range(random(1,5)):
glyphwidth = random(2.3, 2.4)
_ctx.push()
_ctx.scale(width/(glyphwidth*pt), random(1.1, 1.2))
_ctx.skew(slant*7 + random(8))
_ctx.rotate(random(-slant,0))
f = width/(glyphwidth*pt)
f = f*glyphwidth*pt - glyphwidth*pt
f = f/2
_ctx.font("Pixie-Dingbats", pt)
_ctx.text(glyph, x+f, y, width)
_ctx.pop()
def mistake(txt, x, y, pt=20, slant=0.5):
"""Draws a piece of pixie text in strikethrough.
This command is used as a callback from paragraph(),
inserting some errrors in the paragraph flow
here and there.
"""
#Somewhere in the Pixie-Dingbats font are strikethrough characters.
glyph = choice(("C", "D"))
#Guess the length of txt set in the paragraph() command.
#The dx is a visual correction, so the typo appears not
#too close to the previous word, and not too close to the next.
width = len(txt) * random(pt*0.4, pt*0.5)
dx = width/len(txt) * 0.5
#Create a typo in the txt string
#At the current position, draw the typo txt in paragraph().
char = choice(txt[min(len(txt)-1,1):])
txt = txt.replace(char, choice("abcdefghijklmnopqrstuvwxyz"))
paragraph(txt, x-dx, y, width*2, pt, slant)
#Now, strikethrough the typo.
_ctx.push()
_ctx.scale(width/(pt*2.5), random(0.7, 1.3))
_ctx.rotate(random(-3,3))
_ctx.font("Pixie-Dingbats")
_ctx.text(glyph, x-dx, y)
_ctx.pop()
return width
DISTRACTION = 0.05
def distraction(d=0.05):
"""Determine how many writing errors pixie makes.
Distraction ranges between 0.0 and 1.0,
setting how often mistake() is called from paragraph().
The more distracted pixie gets, the more words will be
scribbled through.
Making mistakes is a recursive process: corrections of
mistakes might become mistakes themselves, and so on.
Setting a high distraction on a long paragraph may take
a while to draw.
"""
global DISTRACTION
DISTRACTION = max(0,min(d,1))
def underline(x, y, width, pt=20):
"""Draws a horizontal line.
Draws a horizontal line at (x,y) with the given width.
This command serves as a callback for paragraph()
to underline a paragraph of text, just as Tom does.
"""
_ctx.font("Pixie-Dingbats", pt)
y += random(pt*0.5)
#In handwriting, when you stress something by underlining it,
#you never just put one measely line. This is simulated here
#by drawing several lines on top of each other, like scratching.
for i in range(random(1,2)):
line(x, y+random(pt*0.1), x+width+random(pt), y+random(pt*0.1))
#Little construction banners to top off
#that really you-just-gotta-have Tom handwriting!
if random(100)>94:
_ctx.scale(random(0.9,1.1))
under_construction = "L"
_ctx.text(under_construction, x+width-1.5*pt, y-pt*0.2)
#Return the vertical offset of the cursor.
return y
def height(txt, width, pt=20):
"""Returns the height of paragraph() with txt, width and pt.
This command gives "some" idea of the height,
with an average deviation of 20%.
"""
dx = 0
dy = 0
for i in range(len(txt)):
dx += random(pt*0.3, pt*0.5)
dy += random(-pt*0.05, pt*0.05)
if dx > width and txt[i] in (" ", ".", ","):
dx = random(-pt*0.1, pt*0.2)
dy += random(pt*0.7, pt*0.9) * SPACING
dy += random(pt*0.7, pt*0.9) * SPACING
return dy
textheight = height
def paragraph(txt, x, y, width, pt=20, slant=0.5, line=False, serif=False):
"""Draws a paragraph of Tom's handwriting.
Draws the string txt in Tom's handwriting,
positioned at x, y with the given width, font size pt.
The slant argument controls the speed at which Tom writes.
The lineheight setting of spacing() is taken into account,
and words supplied to keywords() are framed in a border.
The text is drawn in color().
The serif parameter defines serif characters
to be used in the paragraph.
"""
#Ensure that all pixiekeywords() are found,
#even in a single word pixie text.
txt += " "
keywords_done = []
keyword_end = -1
_ctx.transform(CENTER)
dx = x
dy = y
for i in range(len(txt)):
_ctx.push()
#There is a world of difference between handwritten glyphs in a font,
#and handwriting. Handwriting doesn't stay on a straight line,
#two characters never look identical, the pen presses down harder
#now and then. The same is simulated with scale, skew and rotate.
_ctx.scale(random(0.9, 1.1), random(0.9, 1.1))
_ctx.skew(slant*10 + random(8))
_ctx.rotate(slant*-2)
#Set color to pixiecolor(), varying the K value
#to simulate pen pressure.
_ctx.colormode(CMYK)
c, m, y, k = COLOR
_ctx.fill(c, m, y, k+random(-0.2,0.2))
#Draw the current character in txt in the given fontsize.
#Use a bold font for text in a border (see below).
fonts = ("Pixie","Pixie-SemiBold")
if serif: fonts += ("Pixie-Light",)
_ctx.font(choice(fonts), pt)
if i <= keyword_end: _ctx.font("Pixie-Bold", pt)
try: _ctx.text(txt[i].upper(), dx, dy+random(slant*pt*0.1))
except: pass
_ctx.pop()
#Traverse the list of keywords,
#if we are at the beginning of one of those words,
#set a x-coordinate flag.
for keyword in KEYWORDS:
j = i+len(keyword)
#No need to continue if only the first encounter of a keyword
#needs to be processed.
if KEYWORDS_ALL == False and keyword in keywords_done: pass
elif txt[i:j].lower() == keyword.lower():
keywords_done.append(keyword)
keyword_x = dx
keyword_end = j
#When the end of that word is reached,
#we know its width and can draw a border around it.
if i == keyword_end:
border(keyword_x, dy, dx-keyword_x, pt, slant)
#Advance the cursor to the next character in txt.
dx += random(pt*0.3, pt*0.5)
dy += random(-pt*0.05, pt*0.05)
#Advance to a new line if this line exceeds the width,
#and is at the end of a word.
#The spacing() lineheight is taken into account.
if txt[i] == "\n" or (dx-x > width and txt[i] in (" ", ".", ",")):
dx = x + random(-pt*0.1, pt*0.2)
dy += random(pt*0.7, pt*0.9) * SPACING
#Before drawing a nice new word, it may be possible
#that a small error is made, after all, if we write
#a text by hand some thing will have to be corrected as well.
if txt[i] in (" ", ".", ",") and random()<DISTRACTION/2.0:
dx += mistake(txt[i:i+random(3,5)], dx, dy, pt, slant)
if line:
#Draw a line underneath the paragraph of text.
dy = underline(x, dy, width, pt)
#Return the offset of the cursor.
dy += (random(pt*0.7, pt*0.9) * SPACING) * 0.75
return (dx,dy)
text = paragraph
def heading(txt, x, y, width, pt=30, slant=0.0):
"""Draws a title heading in Tom's handwriting.
Draws the string txt in Tom's handwriting,
positioned at x, y with the given width, font size pt.
The slant argument controls the speed at which Tom writes.
The lineheight setting of spacing() is taken into account.
The text is drawn in color().
"""
txt = txt.upper()
_ctx.transform(CENTER)
dx = x
dy = y
for i in range(len(txt)):
_ctx.push()
#Vary each individual character
#to simulate handwriting.
_ctx.scale(random(0.9, 1.5), random(0.9, 1.5))
_ctx.skew(slant*10 + random(8))
_ctx.rotate(slant*-2)
#Set color to pixiecolor(), varying the K value
#to simulate pen pressure.
_ctx.colormode(CMYK)
c, m, y, k = COLOR
_ctx.fill(c, m, y, k+random(-0.0,0.3))
#Draw the current character in txt in the given fontsize.
_ctx.font("Pixie-Fat", pt)
_ctx.text(txt[i], dx, dy, width)
#Advance the cursor to the next character in txt.
dx += random(pt*1.4, pt*1.5)
dy += random(-pt*0.1, pt*0.1)
#Advance to a new line if this line exceeds the width,
#and is at the end of a word.
#The spacing() lineheight is taken into account.
if txt[i] == "\n" or (dx-x > width and txt[i] in (" ", ".", ",")):
dx = x + random(-pt*0.1, pt*0.2)
dy += random(pt*1.3, pt*1.6) * SPACING
_ctx.pop()
#To top it off, draw a cool doodle next to the heading.
if random(100) > 97:
sprite(dx+pt*0.3, dy, pt=pt*0.9)
#Return the offset of the cursor.
dy += random(pt*0.7, pt*0.9) * SPACING
return (dx,dy)
def list(title, list, x, y, width, pt=20, slant=0.5):
"""Draws a small list scheme.
Draws a list scheme, with centrally, the title in a border.
Arrows branch from the title to words in the given list.
"""
from math import radians, sin, cos
#Draw the title in a border using pixie().
#A space is added to the title to ensure it draws a border around it.
spacing(1.0)
keywords(title.split(" "))
txt = title+" "
x_end, y = paragraph(txt, x, y, width, pt, slant)
y -= pt/1.25
for i in range(len(list)):
_ctx.push()
#Somewhere in the Pixie-Dingbats font are arrow characters.
glyph = choice(("E", "F", "G"))
#Set color to pixiecolor(), varying the K value
#to simulate pen pressure.
_ctx.colormode(CMYK)
cc, mc, yc, kc = COLOR
_ctx.fill(cc, mc, yc, kc+random(-0.1,0.2))
#Draw an arrow branching from the title.
_ctx.transform(CORNER)
_ctx.push()
_ctx.translate(x+pt/2,y)
a = random(-40,-35)*(i+1)
_ctx.rotate(a)
f = random(1.0,1.5)
_ctx.scale(f, max(1,f/2))
_ctx.font("Pixie-Dingbats", pt)
_ctx.text(glyph, pt/3, pt*0.35)
_ctx.pop()
#Good old-fashioned geometry to
#calculate where we put the list item next to an arrow.
#Play around with the numbers to get the position exactly right.
glyphwidth = 4.5
dx = cos(radians(a)) * pt * glyphwidth * f
dy = sin(radians(a)) * pt * glyphwidth * (f/1.5)
if a % 360 > 0 and a % 360 < 180: dy += pt*1.5
if a % 360 > 240 and a % 360 < 360: dy -= pt/2
if a % 360 > 80 and a % 360 < 110: dy += pt/2
_ctx.transform(CENTER)
paragraph(list[i], x+dx, y-dy+pt, width/2, pt*0.75)
def sprite(x, y, pt=40):
"""Draws an imbecile.
Draws a doodle sprite at (x,y),
varying legs, faces, bodies, and more.
"""
_ctx.transform(CENTER)
#Set color to pixiecolor(), varying the K value
#to simulate pen pressure.
_ctx.colormode(CMYK)
cc, mc, yc, kc = COLOR
_ctx.fill(cc, mc, yc, kc+random(-0.2,0.2))
#Somewhere in the Pixie-Dingbats font are arms, legs, ...
body = choice(("a","b","c","j","k","l","t","u"))
face = choice(("d","e","f","m","n","o","v","w"))
legs = choice(("g","h","i","p","q","r","x","y"))
balloons = choice(("s","z"))
_ctx.fill(cc, mc, yc, kc+random(-0.2,0.2))
#Draw a body.
_ctx.rotate(random(-20,20))
_ctx.skew(random(-20,20),random(-20,20))
_ctx.font("Pixie-Dingbats", pt * random(0.8,1.4))
_ctx.text(body, x, y)
_ctx.reset()
_ctx.fill(cc, mc, yc, kc+random(-0.2,0.2))
#Draw a face.
_ctx.rotate(random(-20,20))
_ctx.skew(random(-20,20),random(-20,20))
_ctx.font("Pixie-Dingbats", pt * random(0.8,1.4))
_ctx.text(face, x, y)
_ctx.reset()
_ctx.fill(cc, mc, yc, kc+random(-0.2,0.2))
#Draw legs.
_ctx.rotate(random(-20,20))
_ctx.font("Pixie-Dingbats", pt * random(0.9,1.5))
_ctx.text(legs, x, y)
_ctx.reset()
if random(100)>90:
_ctx.fill(cc, mc, yc, kc+random(-0.2,0.2))
#Draw balloon text.
if random(100)>90:
_ctx.rotate(random(-20,20))
_ctx.font("Pixie-Dingbats", pt * random(0.9,1.5))
_ctx.text(balloons, x, y)
_ctx.reset()
def line(x1, y1, x2, y2):
"""Draws a pixie line from coordinates (x1,y1) to (x2,y2)
"""
_ctx.push()
#Set color to pixiecolor(), varying the K value
#to simulate pen pressure.
_ctx.colormode(CMYK)
cc, mc, yc, kc = COLOR
_ctx.fill(cc, mc, yc, kc+random(-0.2,0.2))
#Calculate the length of the line as c.
from math import sqrt, pow, asin, degrees
a = x2 - x1
b = y2 - y1
c = sqrt(pow(a,2) + pow(b,2))
#Choose line glyphs, according to the size of c.
#This ensures that lines of different lengths
#have more or less a same thickness after scaling them.
pt = 30
_ctx.font("Pixie-Dingbats", pt)
if c < 150:
glyphs = ("S","T")
glyphwidth = 1.5*pt
elif c > 400:
glyphs = ("U","V","W","X")
glyphwidth = 10.0*pt
else:
glyphs = ("M", "N", "O")
glyphwidth = 5.5*pt
#Determine the line's angle.
d = degrees(asin(b/(c+0.0001)))
if x2<x1: d = 180 - d
#Scale the glyph to the length of the line.
_ctx.transform(CENTER)
f = c/glyphwidth+0.0001
_ctx.scale(f,f)
#Rotate the glyph to the line's angle.
_ctx.transform(CORNER)
_ctx.translate(x1/f,y1/f)
_ctx.rotate(-d)
#Draw the line glyph.
_ctx.text(choice(glyphs), 0, 0)
_ctx.pop()
def node(x, y, d):
"""Draws a small pixie circle.
This function is expected to work with line(),
together creating a network of nodes.
"""
_ctx.push()
#Set color to pixiecolor(), varying the K value
#to simulate pen pressure.
_ctx.colormode(CMYK)
cc, mc, yc, kc = COLOR
_ctx.fill(cc, mc, yc, kc+random(-0.2,0.2))
pt = 30
_ctx.font("Pixie-Dingbats", pt)
glyphs = ("P","Q","R")
#Scale the glyph to diameter d.
_ctx.transform(CENTER)
f = d/(0.6*pt)
_ctx.scale(f)
for i in range(random(1,3)):
#Draw the glyph, with its center roughly at (x,y).
_ctx.text(choice(glyphs), x-pt/2, y+pt/4)
_ctx.pop()
def tree(root, nodes, x, y, width, height, pt=20, max=10, grow=False, border=False):
"""Draws a tree network scheme.
Draws a tree scheme exploding from a central root.
The nodes list is expected to contain, for example:
[ ("node1",["leaf1a","leaf1b"]), ("node2",["leaf2a"]) ].
Branches connect the nodes, and each node has its
leaves drawn around it.
Nodes grow smaller and smaller if grow is True.
Uses line() and node() as callback to draw the network.
"""
_ctx.push()
#The number of nodes to draw
count = min(max, len(nodes))
#Create a path on which nodes can
#placed later on.
path = [(x,y)]
_ctx.beginpath(x,y)
x0 = x
y0 = y
for i in range(count):
xradius = width/count*(i+0.1)
yradius = height/count*(i+0.1)
#A random location.
#These "grow" further and further away
#from the centre
#dx = x+random(-xradius,xradius)/2
#dy = y+random(-yradius,yradius)/2
dx = x + xradius * random(0.25,0.5) * choice((-1,1))
dy = y + yradius * random(0.25,0.5) * choice((-1,1))
line(x0, y0, dx, dy)
path.append((dx,dy))
x0 = dx
y0 = dy
for x in range(count):
#Get coordinates on path.
dx = path[x+1][0]
dy = path[x+1][1]
#For each node, get its leaves.
nodename, leaves = nodes[x]
#Draw the leaves of a node around an oval.
#The maximum of leaves is limited,
#if you draw more the tree starts to look very unhumanlike.
#I don't think you would draw trees with, say ten or twenty
#leaves by hand.
limit = 3
angle = 15
for i in range(min(limit,len(leaves))):
w = random(-width/16,width/16)
h = random(-width/16,width/16)
line(dx, dy, dx+w, dy+h)
paragraph(leaves[i], dx+w-30, dy+h+pt/4, width/5, pt*0.65)
#Draw the node oval.
#Oval grow smaller and smaller if grow was set to True.
if grow: size = 1.0 - 0.5 * x/count
else: size = 1
node(dx, dy, pt*size)
#Draw the example text on the oval.
#Delay the first node,
#we'll put that one on top later.
if random(100)>95: keywords(nodename, all=choice((True,False)))
paragraph(nodename, dx+pt/3, dy-pt/3, width/2, pt)
#Draw the first central example text.
dx = path[0][0]
dy = path[0][1]
node(dx, dy, pt)
paragraph(root, dx+pt/3, dy-pt/3, width/2)
#Draw a border around the diagram:
if border:
dx -= width/2
dy -= height/2
line(dx-pt/2, dy, dx+width-pt/2, dy)
line(dx+width, dy-pt/2, dx+width-random(pt), dy+height-pt/2)
line(dx+width, dy+height, dx, dy+height-random(pt))
line(dx, dy+height, dx, dy)
_ctx.pop()
def tornado(str, x, y):
"""Experimental tornade-style pixie text.
Text that whirls around like a gust of wind.
Provided as-is for now.
"""
from math import sin, cos, tan, log10
cX = random(1,10)
cY = random(1,10)
for i in range(len(str)):
s = cos(cX)*20
x += cos(cY)*s*1.2
y += log10(cX)*1.2 + sin(cX) * 8
_ctx.push()
paragraph(str[i], x-s/2, y-s/2, 100, pt=max(15,abs(s*1.5)))
_ctx.pop()
cX += random(0.45)
cY += random(0.15)
|
|
import unittest
import datetime
import decimal
import Spartacus.Database
class TestPostgreSQL94(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.v_database = Spartacus.Database.PostgreSQL(
"127.0.0.1", 5494, "spartacus", "spartacus", "spartacus"
)
cls.v_database.Open()
cls.v_database.Execute(
"""
DROP TABLE IF EXISTS departments;
DROP TABLE IF EXISTS employees;
"""
)
cls.v_database.Execute(
"""
CREATE TABLE departments (
dept_no char(4) not null,
dept_name varchar(40) not null
);
"""
)
cls.v_database.Execute(
"""
INSERT INTO departments VALUES('d009','Customer Service');
INSERT INTO departments VALUES('d005','Development');
INSERT INTO departments VALUES('d002','Finance');
INSERT INTO departments VALUES('d003','Human Resources');
INSERT INTO departments VALUES('d001','Marketing');
INSERT INTO departments VALUES('d004','Production');
INSERT INTO departments VALUES('d006','Quality Management');
INSERT INTO departments VALUES('d008','Research');
INSERT INTO departments VALUES('d007','Sales');
"""
)
cls.v_database.Execute(
"""
CREATE TABLE employees (
emp_no integer not null,
birth_date text not null,
first_name varchar(14) not null,
last_name varchar(16) not null,
gender varchar(500) not null,
hire_date text not null
);
"""
)
cls.v_database.Close()
@classmethod
def tearDownClass(cls):
cls.v_database.Open()
cls.v_database.Execute(
"""
DROP TABLE IF EXISTS departments;
DROP TABLE IF EXISTS employees;
"""
)
cls.v_database.Close()
def test_open_close(self):
self.assertIsInstance(self.v_database, Spartacus.Database.PostgreSQL)
self.v_database.Open()
self.assertIsNot(self.v_database.v_con, None)
self.v_database.Close()
self.assertIs(self.v_database.v_con, None)
def test_getconstatus(self):
self.assertEqual(self.v_database.GetConStatus(), 0)
self.v_database.Open()
self.assertEqual(self.v_database.GetConStatus(), 1)
self.v_database.Close()
self.assertEqual(self.v_database.GetConStatus(), 0)
def test_open_autocommit_enabled(self):
self.v_database.Open(p_autocommit=True)
self.assertIsNot(self.v_database.v_con, None)
self.assertTrue(self.v_database.v_con.autocommit)
self.v_database.Close()
def test_open_autocommit_disabled(self):
self.v_database.Open(p_autocommit=False)
self.assertIsNot(self.v_database.v_con, None)
self.assertFalse(self.v_database.v_con.autocommit)
self.v_database.Close()
def test_executescalar(self):
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd005'"
)
self.assertEqual(v_result, "Development")
def test_execute(self):
self.v_database.Open()
self.v_database.Execute(
"insert into departments (dept_no, dept_name) values ('d000', 'Spartacus')"
)
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd000'"
)
self.v_database.Execute("delete from departments where dept_no = 'd000'")
self.v_database.Close()
self.assertEqual(v_result, "Spartacus")
def test_commit(self):
self.v_database.Open(p_autocommit=False)
self.v_database.Execute(
"insert into departments (dept_no, dept_name) values ('d000', 'Spartacus')"
)
self.v_database.Commit()
self.v_database.Close()
self.v_database.Open()
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd000'"
)
self.v_database.Execute("delete from departments where dept_no = 'd000'")
self.v_database.Close()
self.assertEqual(v_result, "Spartacus")
def test_rollback(self):
self.v_database.Open(p_autocommit=False)
self.v_database.Execute(
"insert into departments (dept_no, dept_name) values ('d000', 'Spartacus')"
)
self.v_database.Rollback()
self.v_database.Close()
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd000'"
)
self.assertIs(v_result, None)
def test_close_commit(self):
self.v_database.Open(p_autocommit=False)
self.v_database.Execute(
"insert into departments (dept_no, dept_name) values ('d000', 'Spartacus')"
)
self.v_database.Close(p_commit=True)
self.v_database.Open()
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd000'"
)
self.v_database.Execute("delete from departments where dept_no = 'd000'")
self.v_database.Close()
self.assertEqual(v_result, "Spartacus")
def test_close_rollback(self):
self.v_database.Open(p_autocommit=False)
self.v_database.Execute(
"insert into departments (dept_no, dept_name) values ('d000', 'Spartacus')"
)
self.v_database.Close(p_commit=False)
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd000'"
)
self.assertIs(v_result, None)
def test_getfields(self):
v_result = self.v_database.GetFields(
"""
SELECT 1 AS id,
'Spartacus'::text AS name,
'1988-05-08 17:00:00'::timestamp without time zone AS birth_date,
9.8 AS grade
"""
)
self.assertEqual(len(v_result), 4)
for r in v_result:
self.assertIsInstance(r, Spartacus.Database.DataField)
self.assertEqual(v_result[0].v_name, "id")
self.assertIs(v_result[0].v_type, int)
self.assertEqual(v_result[0].v_dbtype, "int4")
self.assertEqual(v_result[1].v_name, "name")
self.assertIs(v_result[1].v_type, str)
self.assertEqual(v_result[1].v_dbtype, "text")
self.assertEqual(v_result[2].v_name, "birth_date")
self.assertIs(v_result[2].v_type, datetime.datetime)
self.assertEqual(v_result[2].v_dbtype, "timestamp")
self.assertEqual(v_result[3].v_name, "grade")
self.assertIs(v_result[3].v_type, decimal.Decimal)
self.assertEqual(v_result[3].v_dbtype, "numeric")
def test_query(self):
v_result = self.v_database.Query("select * from departments order by dept_no")
self.assertIsInstance(v_result, Spartacus.Database.DataTable)
v_template = ["dept_no", "dept_name"]
self.assertListEqual(v_result.Columns, v_template)
self.assertEqual(len(v_result.Rows), 9)
self.assertEqual(v_result.Rows[0]["dept_no"], "d001")
self.assertEqual(v_result.Rows[0]["dept_name"], "Marketing")
self.assertEqual(v_result.Rows[1]["dept_no"], "d002")
self.assertEqual(v_result.Rows[1]["dept_name"], "Finance")
self.assertEqual(v_result.Rows[2]["dept_no"], "d003")
self.assertEqual(v_result.Rows[2]["dept_name"], "Human Resources")
self.assertEqual(v_result.Rows[3]["dept_no"], "d004")
self.assertEqual(v_result.Rows[3]["dept_name"], "Production")
self.assertEqual(v_result.Rows[4]["dept_no"], "d005")
self.assertEqual(v_result.Rows[4]["dept_name"], "Development")
self.assertEqual(v_result.Rows[5]["dept_no"], "d006")
self.assertEqual(v_result.Rows[5]["dept_name"], "Quality Management")
self.assertEqual(v_result.Rows[6]["dept_no"], "d007")
self.assertEqual(v_result.Rows[6]["dept_name"], "Sales")
self.assertEqual(v_result.Rows[7]["dept_no"], "d008")
self.assertEqual(v_result.Rows[7]["dept_name"], "Research")
self.assertEqual(v_result.Rows[8]["dept_no"], "d009")
self.assertEqual(v_result.Rows[8]["dept_name"], "Customer Service")
def test_query_simple(self):
v_result = self.v_database.Query(
"select * from departments order by dept_no", p_simple=True
)
self.assertIsInstance(v_result, Spartacus.Database.DataTable)
v_template = ["dept_no", "dept_name"]
self.assertListEqual(v_result.Columns, v_template)
self.assertEqual(len(v_result.Rows), 9)
self.assertEqual(v_result.Rows[0][0], "d001")
self.assertEqual(v_result.Rows[0][1], "Marketing")
self.assertEqual(v_result.Rows[1][0], "d002")
self.assertEqual(v_result.Rows[1][1], "Finance")
self.assertEqual(v_result.Rows[2][0], "d003")
self.assertEqual(v_result.Rows[2][1], "Human Resources")
self.assertEqual(v_result.Rows[3][0], "d004")
self.assertEqual(v_result.Rows[3][1], "Production")
self.assertEqual(v_result.Rows[4][0], "d005")
self.assertEqual(v_result.Rows[4][1], "Development")
self.assertEqual(v_result.Rows[5][0], "d006")
self.assertEqual(v_result.Rows[5][1], "Quality Management")
self.assertEqual(v_result.Rows[6][0], "d007")
self.assertEqual(v_result.Rows[6][1], "Sales")
self.assertEqual(v_result.Rows[7][0], "d008")
self.assertEqual(v_result.Rows[7][1], "Research")
self.assertEqual(v_result.Rows[8][0], "d009")
self.assertEqual(v_result.Rows[8][1], "Customer Service")
def test_query_types(self):
v_result = self.v_database.Query(
"""
SELECT 1 AS id,
'Spartacus'::text AS name,
'1988-05-08 17:00:00'::timestamp without time zone AS birth_date,
9.8 AS grade
"""
)
self.assertIsInstance(v_result, Spartacus.Database.DataTable)
v_template = ["id", "name", "birth_date", "grade"]
self.assertListEqual(v_result.Columns, v_template)
self.assertEqual(len(v_result.Rows), 1)
self.assertEqual(v_result.Rows[0]["id"], 1)
self.assertIsInstance(v_result.Rows[0]["id"], int)
self.assertEqual(v_result.Rows[0]["name"], "Spartacus")
self.assertIsInstance(v_result.Rows[0]["name"], str)
self.assertEqual(
v_result.Rows[0]["birth_date"],
datetime.datetime.strptime("1988-05-08 17:00:00", "%Y-%m-%d %H:%M:%S"),
)
self.assertIsInstance(v_result.Rows[0]["birth_date"], datetime.datetime)
self.assertEqual(float(v_result.Rows[0]["grade"]), 9.8)
self.assertIsInstance(v_result.Rows[0]["grade"], decimal.Decimal)
def test_query_alltypesstr(self):
v_result = self.v_database.Query(
"""
SELECT 1 AS id,
'Spartacus'::text AS name,
'1988-05-08 17:00:00'::timestamp without time zone AS birth_date,
9.8 AS grade
""",
p_alltypesstr=True,
)
self.assertIsInstance(v_result, Spartacus.Database.DataTable)
v_template = ["id", "name", "birth_date", "grade"]
self.assertListEqual(v_result.Columns, v_template)
self.assertEqual(len(v_result.Rows), 1)
self.assertEqual(v_result.Rows[0]["id"], "1")
self.assertIsInstance(v_result.Rows[0]["id"], str)
self.assertEqual(v_result.Rows[0]["name"], "Spartacus")
self.assertIsInstance(v_result.Rows[0]["name"], str)
self.assertEqual(v_result.Rows[0]["birth_date"], "1988-05-08 17:00:00")
self.assertIsInstance(v_result.Rows[0]["birth_date"], str)
self.assertEqual(v_result.Rows[0]["grade"], "9.8")
self.assertIsInstance(v_result.Rows[0]["grade"], str)
def test_queryblock_connection_not_open(self):
with self.assertRaises(Spartacus.Database.Exception):
v_result = self.v_database.QueryBlock(
"select * from departments order by dept_no", 4
)
def test_queryblock(self):
self.v_database.Open()
self.assertTrue(self.v_database.v_start)
v_result = self.v_database.QueryBlock(
"select * from departments order by dept_no", 4
)
self.assertFalse(self.v_database.v_start)
self.assertEqual(len(v_result.Rows), 4)
v_result = self.v_database.QueryBlock(
"select * from departments order by dept_no", 4
)
self.assertFalse(self.v_database.v_start)
self.assertEqual(len(v_result.Rows), 4)
v_result = self.v_database.QueryBlock(
"select * from departments order by dept_no", 4
)
self.assertTrue(self.v_database.v_start)
self.assertEqual(len(v_result.Rows), 1)
self.v_database.Close()
self.assertTrue(self.v_database.v_start)
def test_insertblock(self):
v_table = Spartacus.Database.DataTable()
v_table.AddColumn("dept_no")
v_table.AddColumn("dept_name")
v_table.AddRow(["d010", "Spartacus"])
v_table.AddRow(["d011", "Python"])
self.v_database.InsertBlock(v_table, "departments")
v_result = self.v_database.Query(
"select * from departments where dept_no in ('d010', 'd011')"
)
self.assertEqual(len(v_result.Rows), 2)
self.assertEqual(v_result.Rows[0]["dept_no"], "d010")
self.assertEqual(v_result.Rows[0]["dept_name"], "Spartacus")
self.assertEqual(v_result.Rows[1]["dept_no"], "d011")
self.assertEqual(v_result.Rows[1]["dept_name"], "Python")
self.v_database.Execute(
"delete from departments where dept_no in ('d010', 'd011')"
)
def test_insertblock_fields(self):
v_fields = self.v_database.GetFields("select * from employees limit 1")
v_table = Spartacus.Database.DataTable()
for f in v_fields:
v_table.AddColumn(f.v_name)
v_table.AddRow([500000, "1988-05-08", "Spartacus", "Python", "M", "2006-01-01"])
v_table.AddRow([500001, "1988-05-08", "Spartacus", "Python", "M", "2006-01-01"])
self.v_database.InsertBlock(v_table, "employees", v_fields)
v_result = self.v_database.Query(
"select * from employees where emp_no in (500000, 500001)"
)
self.assertEqual(len(v_result.Rows), 2)
self.assertEqual(v_result.Rows[0]["emp_no"], 500000)
self.assertEqual(v_result.Rows[0]["first_name"], "Spartacus")
self.assertEqual(v_result.Rows[1]["emp_no"], 500001)
self.assertEqual(v_result.Rows[1]["first_name"], "Spartacus")
self.v_database.Execute(
"delete from employees where emp_no in (500000, 500001)"
)
if __name__ == "__main__":
unittest.main()
|
|
from itertools import cycle, islice
import re
ascii_alphabet = 'abcdefghijklmnopqrstuvwxyz'
alpha_numerics = 'abcdefghijklmnopqrstuvwxyz0123456789'
vowels = 'aeiou'
consonants = 'bcdfghjklmnpqrstvwxyz'
vowels_and_consonants = (vowels, consonants)
def number_to_multi_base(n, b):
"""
Convert a number to a multi-base (generalization of base projection).
Args:
n: The number to convert
b: The base to convert it to
Returns: A list representing the number in the desired base.
# When b is just one number, it's the base (for e.g. b=2 means binary base)
>>> number_to_multi_base(3, 2)
[1, 1]
>>> number_to_multi_base(4, 2)
[1, 0, 0]
>>> number_to_multi_base(5, 2)
[1, 0, 1]
# But the cool thing about number_to_multi_base is that you can have a more complex base (any iterable, really)
>>> number_to_multi_base(11, [2, 3])
[1, 2, 1]
>>> number_to_multi_base(12, [2, 3])
[1, 0, 0, 0]
>>> number_to_multi_base(13, [2, 3])
[1, 0, 0, 1]
>>> number_to_multi_base(14, [2, 3])
[1, 0, 1, 0]
>>> number_to_multi_base(15, [2, 3])
[1, 0, 1, 1]
>>> number_to_multi_base(16, [2, 3])
[1, 0, 2, 0]
"""
if isinstance(b, (int, float)):
b = [b]
base = cycle(b)
if n == 0:
return [0]
digits = []
while n:
b = next(base)
digits.append(int(n % b))
n //= b
return digits[::-1]
def str_from_num_list(coord, symbols_for_base_idx=vowels_and_consonants, base_phase=0):
"""
Make a string from the coordinates (a) of a number in a given base system (infered from symbols_for_base_idx and
base_phase).
NOTE: symbols_for_base_idx sets should (in most cases) all be disjoint (but this is not validated!)
Args:
coord: An array of integers. Coordinates of a number in a given base system
base_phase: Which base (of symbols_for_base_idx) to start with (and then cycle)
symbols_for_base_idx: Sets of symbols for each base
Returns:
A string (which is the mapping of the number (represented by coord).
>>> str_from_num_list([1,2,1,2], ['ae', 'xyz'])
'ezez'
>>> str_from_num_list([1,2,1,0], ['ae', 'xyz'])
'ezex'
>>>
>>> # [1,2,0,1] is [1,2,1,0], with the last two digits flipped, but you don't get ezxe in the following:
>>> str_from_num_list([1,2,0,1], ['ae', 'xyz'])
'ezay'
"""
n = len(symbols_for_base_idx)
s = ''
for letter_idx, collection_idx in zip(coord, islice(cycle(range(n)), base_phase, None)):
# print(f"{letter_idx} === {collection_idx}")
s += symbols_for_base_idx[collection_idx][letter_idx]
return s
# TODO: Look into coverage. Couldn't produce 'magic' with ['ai', 'mgc'] or ['mgc', 'ai']
def text_for_num(num, symbols_for_base_idx=vowels_and_consonants):
"""
Map a number to a string.
The map is bijective (a.k.a. "1-to-1" if the set of symbols in symbols_for_base_idx are non-overlapping.
Args:
num: A number to map to text
symbols_for_base_idx: The sets of symbols to use: A list of strings, each string representing a
collection of symbols to use in each base.
Returns:
A string representing the input number.
>>> # using the default symbols_for_base_idx (vowels and consonants):
>>> text_for_num(1060)
'caca'
>>> text_for_num(14818)
'sapu'
>>> text_for_num(335517)
'tecon'
>>>
>>> # using custom ones:
>>> text_for_num(153, ['ai', 'gcm'])
'magic'
"""
base_cardinalities = list(map(len, symbols_for_base_idx))
n_bases = len(base_cardinalities)
base_phase = num % n_bases
num = (num - base_phase) // n_bases
base = list(islice(cycle(base_cardinalities), base_phase, n_bases + base_phase))
coord = number_to_multi_base(num, base)
return str_from_num_list(coord[::-1], symbols_for_base_idx, base_phase)[::-1]
inf = float('infinity')
def text_to_pronounceable_text(text,
symbols_for_base_idx=vowels_and_consonants,
captured_alphabet=alpha_numerics,
case_sensitive=False,
max_word_length=30,
artificial_word_sep='_',
assert_no_word_sep_in_text=False
):
"""
Args:
text: text you want to map
symbols_for_base_idx: symbols you want to map TO (default is vowels and consonants)
captured_alphabet: the symbols of the words you want to map FROM (essentially, in contrast to filler characters)
case_sensitive: Whether the input text should be lower cased before being processed
max_word_length: The maximum length of a pronounceable word
artificial_word_sep: The separator to separate pronounceable words when the word is too long
assert_no_word_sep_in_text: Whether to assert that artificial_word_sep is not already in the input text
(to avoid clashing and non-invertibility)
Returns:
A more pronounceable text, where pronounceable is defined by you, so not my fault if it's not.
>>> text_to_pronounceable_text('asd8098 098df')
'izokagamuta osuhoju'
>>> text_to_pronounceable_text('asd8098 098df', max_word_length=4, artificial_word_sep='_')
'izo_kaga_muta osu_hoju'
"""
if not case_sensitive:
text = text.lower()
p = re.compile(f'[{captured_alphabet}]+') # to match the text to be mapped
anti_p = re.compile(f'[^{captured_alphabet}]+') # to match the chunks of separator (not matched) text
matched_text = anti_p.split(text)
num_of_character = {c: i for i, c in enumerate(captured_alphabet)} # the numerical mapping of alphabet
base_n = len(captured_alphabet)
# function to get the (base_n) number for a chk
num_of_chk = lambda chk: sum(num_of_character[c] * (base_n ** i) for i, c in enumerate(chk))
_text_for_num = lambda num: text_for_num(num, symbols_for_base_idx)
pronounceable_words = [_text_for_num(num_of_chk(chk)) for chk in matched_text]
if max_word_length < inf:
def post_process_word(word):
if len(word) > max_word_length:
if assert_no_word_sep_in_text:
assert artificial_word_sep not in text, \
f"Your artificial_word_sep ({artificial_word_sep}) was in the text (so no bijective mapping)"
r = (len(word) % max_word_length)
word_suffix = word[:r]
word_prefix = word[r:]
word = artificial_word_sep.join(map(''.join, zip(*([iter(word_prefix)] * max_word_length))))
if word_suffix:
word = word_suffix + artificial_word_sep + word
return word
else:
return word
pronounceable_words = list(map(post_process_word, pronounceable_words))
separator_text = p.split(text)
if len(pronounceable_words) < len(separator_text):
return ''.join(map(''.join, zip(separator_text, pronounceable_words)))
else:
return ''.join(map(''.join, zip(pronounceable_words, separator_text)))
class FunTests:
@staticmethod
def print_sequences_in_columns(start_num=3000, end_num=3060):
for i in range(start_num, end_num):
# print(f"-----{i}")
if i % 2:
print("".join(map(str, (text_for_num(i)))))
else:
print("\t" + "".join(map(str, (text_for_num(i)))))
if __name__ == '__main__':
try:
import argh
except ImportError:
raise ImportError("You don't have argh. You can install it by doing:\n"
" pip install argh\n"
"In your terminal/environment,")
argh.dispatch_command(text_to_pronounceable_text)
|
|
"""Alarm Script."""
import sys
import time
from . import config as cg
from . import all_off, fade, lcd
from .context import IO
# FIXME: Re-implement as a thread with an interrupt Exception
###########################
# Configuration:
###########################
# Electronic Pin Numbering Globals:
off_button = cg.get_pin('Input_Pins', 'off_button')
pin_buzzer = cg.get_pin('Haptics', 'pin_buzzer')
pin_shaker = cg.get_pin('Haptics', 'pin_shaker')
pin_blue = cg.get_pin('RGB_Strip', 'pin_blue')
pin_red = cg.get_pin('RGB_Strip', 'pin_red')
pin_green = cg.get_pin('RGB_Strip', 'pin_green')
# # TODO: Add second LED Strip
# pin_blue2 = cg.get_pin('RGB_Strip', 'pin_blue2')
# pin_red2 = cg.get_pin('RGB_Strip', 'pin_red2')
# pin_green2 = cg.get_pin('RGB_Strip', 'pin_green2')
# Allow shorter run time for testing with ANY argument
if len(sys.argv) > 1:
# arg = cg.parse_argv(sys)
alarm_stage_time = [0, 5, 10, 15]
else:
alarm_stage_time = [30, 180, 80, 60]
step_size = 0.2
alarm_on = True
_running = False
cg.quiet_logging(False)
# Settings for fade_led_strip()
max_brightness = 0.6
last_beep, fade_stage = 0, 0
fade_stages = [pin_green, pin_red, pin_blue,
pin_green, pin_red, pin_blue]
a_s_t = alarm_stage_time[3]
l_f_s = len(fade_stages)
if a_s_t < l_f_s:
raise ValueError('a_s_t({}) not > len({})'.format(a_s_t, l_f_s))
time_total = a_s_t / l_f_s
###########################
# Functions and Stuff
###########################
def alarm_deactivate(pin_num):
"""Button callback on rising edge."""
global alarm_on
if IO.input(pin_num):
cg.send('Deactivating Alarm on {}'.format(IO.input(pin_num)))
alarm_on = False
def gen_button_cb(pin_num):
"""For testing the callback function."""
if IO.input(pin_num):
cg.send('Triggered on a rising edge from pin: {}'.format(pin_num))
else:
cg.send('Triggered on a falling edge from pin: {}'.format(pin_num))
def beep(counter):
"""Cycle through different low frequencies."""
global last_beep
if counter % 2 <= 1 and last_beep == 0:
cg.set_pwm(pin_buzzer, 0.2)
last_beep = 0.2
elif counter % 2 > 1 and last_beep == 0.2:
cg.set_pwm(pin_buzzer, 0.0)
last_beep = 0
def fade_led_strip(counter):
"""Cycle the LED Strip through various colors."""
global fade_stage
if time_total < 0.1:
time_step = 1
else:
time_step = (counter % time_total) + 1.0
# Increment the LED value
if fade_stage % 2 == 0:
value = 1 - (1 / time_step)
# Decrement the LED value
elif fade_stage % 2 == 1:
value = 1 / time_step
# Update the Alarm Electronics
if fade_stage < len(fade_stages):
# cg.set_pwm(pin_buzzer, ((counter % 2) + 1.0) / 4)
cg.set_pwm(fade_stages[fade_stage], max_brightness * value)
if time_step == time_total:
fade_stage += 1
else:
# cg.set_pwm(pin_buzzer, 0.5)
fade.all_on(max_brightness)
###########################
# Alarm logic!
###########################
def stop():
"""Halt execution."""
global _running
_running = False
cg.send('\nAlarm Cycles Finished\n')
cg.ifttt('PiAlarm_SendText', {'value1': 'PiAlarm Completed'})
# Cleanup tasks:
all_off.deactivate()
IO.remove_event_detect(off_button)
#
# IO.cleanup() # Removed to avoid interference with clock
#
# release_pwm(pin_shaker)
# etc...
# # Then stop pi-blaster for good measure:
# stopPiB = "sudo kill $(ps aux | grep [b]laster | awk '{print $2}')"
# subprocess.call(stopPiB, shell=True)
def start(user_home):
"""Start alarm sequence."""
global fade_stage, _running, alarm_on
_running = True
stage, stage3_rep_counter = 1, 0
cg.send('Set IO mode and event detection')
IO.setwarnings(False)
IO.setmode(IO.BCM)
IO.setup(off_button, IO.IN)
IO.add_event_detect(off_button, IO.RISING, callback=alarm_deactivate, bouncetime=300)
while stage < 4 and stage3_rep_counter < 3 and user_home:
all_off.deactivate()
cg.send('\nStarting Stage: {}'.format(stage) +
' for {} seconds'.format(alarm_stage_time[stage]))
current_time = 0
# Stage 1 - Green LED Strip for 1 minute
if stage == 1 and alarm_on:
cg.send('Configuring Stage 1')
cg.set_pwm(pin_green, 0.2)
cg.set_pwm(pin_red, 0.2)
cb = False
# Stage 2 - Purple LED Strip and Buzzer
if stage == 2 and alarm_on:
cg.send('Configuring Stage 2')
cg.set_pwm(pin_blue, 0.5)
cg.set_pwm(pin_red, 0.5)
cg.set_pwm(pin_buzzer, 0.1)
cb = beep
# Stage 3 - LED Strip, Bed Shaker, and Buzzer
if stage == 3 and alarm_on:
cg.send('Configuring Stage 3')
cg.set_pwm(pin_shaker, 1)
cg.set_pwm(pin_buzzer, 0.5)
cb = fade_led_strip
# Run alarm and check for button interrupt:
while alarm_on and current_time < alarm_stage_time[stage]:
time.sleep(step_size)
current_time += step_size
if cb:
cb(current_time)
cg.send('Completed Step #{0}'.format(stage))
# Prep for the next loop:
if stage == 3 and alarm_on:
all_off.deactivate()
cg.send('\nLooping back through Stage 3')
time.sleep(7)
fade_stage = 0
stage3_rep_counter += 1
else:
stage += 1
current_time = 0
user_home = cg.check_status()
cg.send('Checking home (= {}) before next loop'.format(user_home))
stop()
def run():
"""Check state and start alarm if ready."""
global _running, alarm_on
user_home = cg.check_status()
alarm_on = True
if _running:
_err = 'ERROR: ALARM IS ALREADY RUNNING!'
cg.send(_err)
cg.ifttt('PiAlarm_SendText', {'value1': _err})
elif user_home:
lcd.brightness('alt')
cg.ifttt('PiAlarm_SendText', {'value1': '** PiAlarm Started! **'})
time.sleep(alarm_stage_time[0]) # let text alert go out
start(cg.check_status())
else:
cg.ifttt('PiAlarm_SendText', {'value1': 'User away, no PiAlarm'})
if __name__ == '__main__':
run()
|
|
from functools import reduce
from operator import mul
import numpy as np
import pytest
from devito import (Grid, Function, TimeFunction, SparseTimeFunction, SpaceDimension,
Dimension, SubDimension, Eq, Inc, Operator, info)
from devito.exceptions import InvalidArgument
from devito.ir.iet import Call, Iteration, Conditional, FindNodes, retrieve_iteration_tree
from devito.passes.iet.languages.openmp import OmpRegion
from devito.tools import as_tuple
from devito.types import Scalar
def get_blocksizes(op, opt, grid, blockshape, level=0):
blocksizes = {'%s0_blk%d_size' % (d, level): v
for d, v in zip(grid.dimensions, blockshape)}
blocksizes = {k: v for k, v in blocksizes.items() if k in op._known_arguments}
# Sanity check
if grid.dim == 1 or len(blockshape) == 0:
assert len(blocksizes) == 0
return {}
try:
if opt[1].get('blockinner'):
assert len(blocksizes) >= 1
if grid.dim == len(blockshape):
assert len(blocksizes) == len(blockshape)
else:
assert len(blocksizes) <= len(blockshape)
return blocksizes
except AttributeError:
assert len(blocksizes) == 0
return {}
def _new_operator2(shape, time_order, blockshape=None, opt=None):
blockshape = as_tuple(blockshape)
grid = Grid(shape=shape, dtype=np.int32)
infield = TimeFunction(name='infield', grid=grid, time_order=time_order)
infield.data[:] = np.arange(reduce(mul, shape), dtype=np.int32).reshape(shape)
outfield = TimeFunction(name='outfield', grid=grid, time_order=time_order)
stencil = Eq(outfield.forward.indexify(),
outfield.indexify() + infield.indexify()*3.0)
op = Operator(stencil, opt=opt)
blocksizes = get_blocksizes(op, opt, grid, blockshape)
op(infield=infield, outfield=outfield, t=10, **blocksizes)
return outfield, op
def _new_operator3(shape, blockshape0=None, blockshape1=None, opt=None):
blockshape0 = as_tuple(blockshape0)
blockshape1 = as_tuple(blockshape1)
grid = Grid(shape=shape, extent=shape, dtype=np.float64)
# Allocate the grid and set initial condition
# Note: This should be made simpler through the use of defaults
u = TimeFunction(name='u', grid=grid, time_order=1, space_order=(2, 2, 2))
u.data[0, :] = np.linspace(-1, 1, reduce(mul, shape)).reshape(shape)
# Derive the stencil according to devito conventions
op = Operator(Eq(u.forward, 0.5 * u.laplace + u), opt=opt)
blocksizes0 = get_blocksizes(op, opt, grid, blockshape0, 0)
blocksizes1 = get_blocksizes(op, opt, grid, blockshape1, 1)
op.apply(u=u, t=10, **blocksizes0, **blocksizes1)
return u.data[1, :], op
@pytest.mark.parametrize("shape", [(41,), (20, 33), (45, 31, 45)])
def test_composite_transformation(shape):
wo_blocking, _ = _new_operator2(shape, time_order=2, opt='noop')
w_blocking, _ = _new_operator2(shape, time_order=2, opt='advanced')
assert np.equal(wo_blocking.data, w_blocking.data).all()
@pytest.mark.parametrize("blockinner,exp_calls,exp_iters", [
(False, 4, 5),
(True, 8, 6)
])
def test_cache_blocking_structure(blockinner, exp_calls, exp_iters):
# Check code structure
_, op = _new_operator2((10, 31, 45), time_order=2,
opt=('blocking', {'blockinner': blockinner,
'par-collapse-ncores': 1}))
calls = FindNodes(Call).visit(op)
assert len(calls) == exp_calls
trees = retrieve_iteration_tree(op._func_table['bf0'].root)
assert len(trees) == 1
tree = trees[0]
assert len(tree) == exp_iters
if blockinner:
assert all(tree[i].dim.is_Incr for i in range(exp_iters))
else:
assert all(tree[i].dim.is_Incr for i in range(exp_iters-1))
assert not tree[-1].dim.is_Incr
# Check presence of openmp pragmas at the right place
_, op = _new_operator2((10, 31, 45), time_order=2,
opt=('blocking', {'openmp': True,
'blockinner': blockinner,
'par-collapse-ncores': 1}))
trees = retrieve_iteration_tree(op._func_table['bf0'].root)
assert len(trees) == 1
tree = trees[0]
assert len(tree.root.pragmas) == 1
assert 'omp for' in tree.root.pragmas[0].value
# Also, with omp parallelism enabled, the step increment must be != 0
# to avoid omp segfaults at scheduling time (only certain omp implementations,
# including Intel's)
conditionals = FindNodes(Conditional).visit(op._func_table['bf0'].root)
assert len(conditionals) == 1
conds = conditionals[0].condition.args
expected_guarded = tree[:2+blockinner]
assert len(conds) == len(expected_guarded)
assert all(i.lhs == j.step for i, j in zip(conds, expected_guarded))
def test_cache_blocking_structure_subdims():
"""
Test that:
* With local SubDimensions no-blocking is expected.
* With non-local SubDimensions, blocking is expected.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
xi, yi, zi = grid.interior.dimensions
t = grid.stepping_dim
xl = SubDimension.left(name='xl', parent=x, thickness=4)
f = TimeFunction(name='f', grid=grid)
assert xl.local
# Local SubDimension -> no blocking expected
op = Operator(Eq(f[t+1, xl, y, z], f[t, xl, y, z] + 1))
assert len(op._func_table) == 0
# Non-local SubDimension -> blocking expected
op = Operator(Eq(f.forward, f + 1, subdomain=grid.interior))
trees = retrieve_iteration_tree(op._func_table['bf0'].root)
assert len(trees) == 1
tree = trees[0]
assert len(tree) == 5
assert tree[0].dim.is_Incr and tree[0].dim.parent is xi and tree[0].dim.root is x
assert tree[1].dim.is_Incr and tree[1].dim.parent is yi and tree[1].dim.root is y
assert tree[2].dim.is_Incr and tree[2].dim.parent is tree[0].dim and\
tree[2].dim.root is x
assert tree[3].dim.is_Incr and tree[3].dim.parent is tree[1].dim and\
tree[3].dim.root is y
assert not tree[4].dim.is_Incr and tree[4].dim is zi and tree[4].dim.parent is z
@pytest.mark.parallel(mode=[(1, 'full')]) # Shortcut to put loops in nested efuncs
def test_cache_blocking_structure_multiple_efuncs():
"""
Test cache blocking in multiple nested elemental functions.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
u = TimeFunction(name="u", grid=grid, space_order=2)
U = TimeFunction(name="U", grid=grid, space_order=2)
src = SparseTimeFunction(name="src", grid=grid, nt=3, npoint=1,
coordinates=np.array([(0.5, 0.5, 0.5)]))
eqns = [Eq(u.forward, u.dx)]
eqns += src.inject(field=u.forward, expr=src)
eqns += [Eq(U.forward, U.dx + u.forward)]
op = Operator(eqns)
for i in ['bf0', 'bf1']:
assert i in op._func_table
iters = FindNodes(Iteration).visit(op._func_table[i].root)
assert len(iters) == 5
assert iters[0].dim.parent is x
assert iters[1].dim.parent is y
assert iters[4].dim is z
assert iters[2].dim.parent is iters[0].dim
assert iters[3].dim.parent is iters[1].dim
@pytest.mark.parametrize("shape", [(10,), (10, 45), (20, 33), (10, 31, 45), (45, 31, 45)])
@pytest.mark.parametrize("time_order", [2])
@pytest.mark.parametrize("blockshape", [2, (3, 3), (9, 20), (2, 9, 11), (7, 15, 23)])
@pytest.mark.parametrize("blockinner", [False, True])
def test_cache_blocking_time_loop(shape, time_order, blockshape, blockinner):
wo_blocking, _ = _new_operator2(shape, time_order, opt='noop')
w_blocking, _ = _new_operator2(shape, time_order, blockshape,
opt=('blocking', {'blockinner': blockinner}))
assert np.equal(wo_blocking.data, w_blocking.data).all()
@pytest.mark.parametrize("shape,blockshape", [
((25, 25, 46), (25, 25, 46)),
((25, 25, 46), (7, 25, 46)),
((25, 25, 46), (25, 25, 7)),
((25, 25, 46), (25, 7, 46)),
((25, 25, 46), (5, 25, 7)),
((25, 25, 46), (10, 3, 46)),
((25, 25, 46), (25, 7, 11)),
((25, 25, 46), (8, 2, 4)),
((25, 25, 46), (2, 4, 8)),
((25, 25, 46), (4, 8, 2)),
((25, 46), (25, 7)),
((25, 46), (7, 46))
])
def test_cache_blocking_edge_cases(shape, blockshape):
time_order = 2
wo_blocking, _ = _new_operator2(shape, time_order, opt='noop')
w_blocking, _ = _new_operator2(shape, time_order, blockshape,
opt=('blocking', {'blockinner': True}))
assert np.equal(wo_blocking.data, w_blocking.data).all()
@pytest.mark.parametrize("shape,blockshape", [
((3, 3), (3, 3)),
((4, 4), (3, 4)),
((5, 5), (3, 4)),
((6, 6), (3, 4)),
((7, 7), (3, 4)),
((8, 8), (3, 4)),
((9, 9), (3, 4)),
((10, 10), (3, 4)),
((11, 11), (3, 4)),
((12, 12), (3, 4)),
((13, 13), (3, 4)),
((14, 14), (3, 4)),
((15, 15), (3, 4))
])
def test_cache_blocking_edge_cases_highorder(shape, blockshape):
wo_blocking, a = _new_operator3(shape, opt='noop')
w_blocking, b = _new_operator3(shape, blockshape, opt=('blocking',
{'blockinner': True}))
assert np.allclose(wo_blocking, w_blocking, rtol=1e-12)
@pytest.mark.parametrize("blockshape0,blockshape1,exception", [
((24, 24, 40), (24, 24, 40), False),
((24, 24, 40), (4, 4, 4), False),
((24, 24, 40), (8, 8, 8), False),
((20, 20, 12), (4, 4, 4), False),
((28, 32, 16), (14, 16, 8), False),
((12, 12, 60), (4, 12, 4), False),
((12, 12, 60), (4, 5, 4), True), # not a perfect divisor
((12, 12, 60), (24, 4, 4), True), # bigger than outer block
])
def test_cache_blocking_hierarchical(blockshape0, blockshape1, exception):
shape = (51, 102, 71)
wo_blocking, a = _new_operator3(shape, opt='noop')
try:
w_blocking, b = _new_operator3(shape, blockshape0, blockshape1,
opt=('blocking', {'blockinner': True,
'blocklevels': 2}))
assert not exception
assert np.allclose(wo_blocking, w_blocking, rtol=1e-12)
except InvalidArgument:
assert exception
except:
assert False
@pytest.mark.parametrize("blockinner", [False, True])
def test_cache_blocking_imperfect_nest(blockinner):
"""
Test that a non-perfect Iteration nest is blocked correctly.
"""
grid = Grid(shape=(4, 4, 4), dtype=np.float64)
u = TimeFunction(name='u', grid=grid, space_order=2)
v = TimeFunction(name='v', grid=grid, space_order=2)
eqns = [Eq(u.forward, v.laplace),
Eq(v.forward, u.forward.dz)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt=('advanced', {'blockinner': blockinner}))
# First, check the generated code
trees = retrieve_iteration_tree(op1._func_table['bf0'].root)
assert len(trees) == 2
assert len(trees[0]) == len(trees[1])
assert all(i is j for i, j in zip(trees[0][:4], trees[1][:4]))
assert trees[0][4] is not trees[1][4]
assert trees[0].root.dim.is_Incr
assert trees[1].root.dim.is_Incr
assert op1.parameters[7] is trees[0][0].step
assert op1.parameters[10] is trees[0][1].step
u.data[:] = 0.2
v.data[:] = 1.5
op0(time_M=0)
u1 = TimeFunction(name='u1', grid=grid, space_order=2)
v1 = TimeFunction(name='v1', grid=grid, space_order=2)
u1.data[:] = 0.2
v1.data[:] = 1.5
op1(u=u1, v=v1, time_M=0)
assert np.all(u.data == u1.data)
assert np.all(v.data == v1.data)
@pytest.mark.parametrize("blockinner", [False, True])
def test_cache_blocking_imperfect_nest_v2(blockinner):
"""
Test that a non-perfect Iteration nest is blocked correctly. This
is slightly different than ``test_cache_blocking_imperfect_nest``
as here only one Iteration gets blocked.
"""
shape = (16, 16, 16)
grid = Grid(shape=shape, dtype=np.float64)
u = TimeFunction(name='u', grid=grid, space_order=4)
u.data[:] = np.linspace(0, 1, reduce(mul, shape), dtype=np.float64).reshape(shape)
eq = Eq(u.forward, 0.01*u.dy.dy)
op0 = Operator(eq, opt='noop')
op1 = Operator(eq, opt=('cire-sops', {'blockinner': blockinner}))
op2 = Operator(eq, opt=('advanced-fsg', {'blockinner': blockinner}))
# First, check the generated code
trees = retrieve_iteration_tree(op2._func_table['bf0'].root)
assert len(trees) == 2
assert len(trees[0]) == len(trees[1])
assert all(i is j for i, j in zip(trees[0][:2], trees[1][:2]))
assert trees[0][2] is not trees[1][2]
assert trees[0].root.dim.is_Incr
assert trees[1].root.dim.is_Incr
assert op2.parameters[6] is trees[0].root.step
op0(time_M=0)
u1 = TimeFunction(name='u1', grid=grid, space_order=4)
u1.data[:] = np.linspace(0, 1, reduce(mul, shape), dtype=np.float64).reshape(shape)
op1(time_M=0, u=u1)
u2 = TimeFunction(name='u2', grid=grid, space_order=4)
u2.data[:] = np.linspace(0, 1, reduce(mul, shape), dtype=np.float64).reshape(shape)
op2(time_M=0, u=u2)
assert np.allclose(u.data, u1.data, rtol=1e-07)
assert np.allclose(u.data, u2.data, rtol=1e-07)
class TestNodeParallelism(object):
def test_nthreads_generation(self):
grid = Grid(shape=(10, 10))
f = TimeFunction(name='f', grid=grid)
eq = Eq(f.forward, f + 1)
op0 = Operator(eq, openmp=True)
# `nthreads` must appear among the Operator parameters
assert op0.nthreads in op0.parameters
# `nthreads` is bindable to a runtime value
assert op0.nthreads._arg_values()
@pytest.mark.parametrize('exprs,expected', [
# trivial 1D
(['Eq(fa[x], fa[x] + fb[x])'],
(True,)),
# trivial 1D
(['Eq(t0, fa[x] + fb[x])', 'Eq(fa[x], t0 + 1)'],
(True,)),
# trivial 2D
(['Eq(t0, fc[x,y] + fd[x,y])', 'Eq(fc[x,y], t0 + 1)'],
(True, False)),
# outermost parallel, innermost sequential
(['Eq(t0, fc[x,y] + fd[x,y])', 'Eq(fc[x,y+1], t0 + 1)'],
(True, False)),
# outermost sequential, innermost parallel
(['Eq(t0, fc[x,y] + fd[x,y])', 'Eq(fc[x+1,y], t0 + 1)'],
(False, True)),
# outermost sequential, innermost parallel
(['Eq(fc[x,y], fc[x+1,y+1] + fc[x-1,y])'],
(False, True)),
# outermost parallel w/ repeated dimensions (hence irregular dependencies)
# both `x` and `y` are parallel-if-atomic loops
(['Inc(t0, fc[x,x] + fd[x,y+1])', 'Eq(fc[x,x], t0 + 1)'],
(True, False)),
# outermost sequential, innermost sequential (classic skewing example)
(['Eq(fc[x,y], fc[x,y+1] + fc[x-1,y])'],
(False, False)),
# skewing-like over two Eqs
(['Eq(t0, fc[x,y+2] + fc[x-1,y+2])', 'Eq(fc[x,y+1], t0 + 1)'],
(False, False)),
# outermost parallel, innermost sequential w/ double tensor write
(['Eq(fc[x,y], fc[x,y+1] + fd[x-1,y])', 'Eq(fd[x-1,y+1], fd[x-1,y] + fc[x,y+1])'],
(True, False, False)),
# outermost sequential, innermost parallel w/ mixed dimensions
(['Eq(fc[x+1,y], fc[x,y+1] + fc[x,y])', 'Eq(fc[x+1,y], 2. + fc[x,y+1])'],
(False, True)),
])
def test_iterations_ompized(self, exprs, expected):
grid = Grid(shape=(4, 4))
x, y = grid.dimensions # noqa
fa = Function(name='fa', grid=grid, dimensions=(x,), shape=(4,)) # noqa
fb = Function(name='fb', grid=grid, dimensions=(x,), shape=(4,)) # noqa
fc = Function(name='fc', grid=grid) # noqa
fd = Function(name='fd', grid=grid) # noqa
t0 = Scalar(name='t0') # noqa
eqns = []
for e in exprs:
eqns.append(eval(e))
op = Operator(eqns, opt='openmp')
iterations = FindNodes(Iteration).visit(op)
assert len(iterations) == len(expected)
# Check for presence of pragma omp
for i, j in zip(iterations, expected):
pragmas = i.pragmas
if j is True:
assert len(pragmas) == 1
pragma = pragmas[0]
assert 'omp for' in pragma.value
else:
for k in pragmas:
assert 'omp for' not in k.value
def test_dynamic_nthreads(self):
grid = Grid(shape=(16, 16, 16))
f = TimeFunction(name='f', grid=grid)
sf = SparseTimeFunction(name='sf', grid=grid, npoint=1, nt=5)
eqns = [Eq(f.forward, f + 1)]
eqns += sf.interpolate(f)
op = Operator(eqns, opt='openmp')
parregions = FindNodes(OmpRegion).visit(op)
assert len(parregions) == 2
# Check suitable `num_threads` appear in the generated code
# Not very elegant, but it does the trick
assert 'num_threads(nthreads)' in str(parregions[0].header[0])
assert 'num_threads(nthreads_nonaffine)' in str(parregions[1].header[0])
# Check `op` accepts the `nthreads*` kwargs
op.apply(time=0)
op.apply(time_m=1, time_M=1, nthreads=4)
op.apply(time_m=1, time_M=1, nthreads=4, nthreads_nonaffine=2)
op.apply(time_m=1, time_M=1, nthreads_nonaffine=2)
assert np.all(f.data[0] == 2.)
# Check the actual value assumed by `nthreads` and `nthreads_nonaffine`
assert op.arguments(time=0, nthreads=123)['nthreads'] == 123
assert op.arguments(time=0, nthreads_nonaffine=100)['nthreads_nonaffine'] == 100
@pytest.mark.parametrize('eqns,expected,blocking', [
('[Eq(f, 2*f)]', [2, 0, 0], False),
('[Eq(u, 2*u)]', [0, 2, 0, 0], False),
('[Eq(u, 2*u)]', [3, 0, 0, 0, 0, 0], True),
('[Eq(u, 2*u), Eq(f, u.dzr)]', [0, 2, 0, 0, 0], False)
])
def test_collapsing(self, eqns, expected, blocking):
grid = Grid(shape=(3, 3, 3))
f = Function(name='f', grid=grid) # noqa
u = TimeFunction(name='u', grid=grid) # noqa
eqns = eval(eqns)
if blocking:
op = Operator(eqns, opt=('blocking', 'simd', 'openmp',
{'blockinner': True, 'par-collapse-ncores': 1,
'par-collapse-work': 0}))
iterations = FindNodes(Iteration).visit(op._func_table['bf0'])
else:
op = Operator(eqns, opt=('simd', 'openmp', {'par-collapse-ncores': 1,
'par-collapse-work': 0}))
iterations = FindNodes(Iteration).visit(op)
assert len(iterations) == len(expected)
# Check for presence of pragma omp + collapse clause
for i, j in zip(iterations, expected):
if j > 0:
assert len(i.pragmas) == 1
pragma = i.pragmas[0]
assert 'omp for collapse(%d)' % j in pragma.value
else:
for k in i.pragmas:
assert 'omp for collapse' not in k.value
def test_collapsing_v2(self):
"""
MFE from issue #1478.
"""
n = 8
m = 8
nx, ny, nchi, ncho = 12, 12, 1, 1
x, y = SpaceDimension("x"), SpaceDimension("y")
ci, co = Dimension("ci"), Dimension("co")
i, j = Dimension("i"), Dimension("j")
grid = Grid((nx, ny), dtype=np.float32, dimensions=(x, y))
X = Function(name="xin", dimensions=(ci, x, y),
shape=(nchi, nx, ny), grid=grid, space_order=n//2)
dy = Function(name="dy", dimensions=(co, x, y),
shape=(ncho, nx, ny), grid=grid, space_order=n//2)
dW = Function(name="dW", dimensions=(co, ci, i, j), shape=(ncho, nchi, n, m),
grid=grid)
eq = [Eq(dW[co, ci, i, j],
dW[co, ci, i, j] + dy[co, x, y]*X[ci, x+i-n//2, y+j-m//2])
for i in range(n) for j in range(m)]
op = Operator(eq, opt=('advanced', {'openmp': True}))
iterations = FindNodes(Iteration).visit(op)
assert len(iterations) == 4
assert iterations[0].ncollapsed == 1
assert iterations[1].is_Vectorized
assert iterations[2].is_Sequential
assert iterations[3].is_Sequential
def test_scheduling(self):
"""
Affine iterations -> #pragma omp ... schedule(dynamic,1) ...
Non-affine iterations -> #pragma omp ... schedule(dynamic,chunk_size) ...
"""
grid = Grid(shape=(11, 11))
u = TimeFunction(name='u', grid=grid, time_order=2, save=5, space_order=0)
sf1 = SparseTimeFunction(name='s', grid=grid, npoint=1, nt=5)
eqns = [Eq(u.forward, u + 1)]
eqns += sf1.interpolate(u)
op = Operator(eqns, opt=('openmp', {'par-dynamic-work': 0}))
iterations = FindNodes(Iteration).visit(op)
assert len(iterations) == 4
assert iterations[1].is_Affine
assert 'schedule(dynamic,1)' in iterations[1].pragmas[0].value
assert not iterations[3].is_Affine
assert 'schedule(dynamic,chunk_size)' in iterations[3].pragmas[0].value
@pytest.mark.parametrize('so', [0, 1, 2])
@pytest.mark.parametrize('dim', [0, 1, 2])
def test_array_reduction(self, so, dim):
"""
Test generation of OpenMP reduction clauses involving Function's.
"""
grid = Grid(shape=(3, 3, 3))
d = grid.dimensions[dim]
f = Function(name='f', shape=(3,), dimensions=(d,), grid=grid, space_order=so)
u = TimeFunction(name='u', grid=grid)
op = Operator(Inc(f, u + 1), opt=('openmp', {'par-collapse-ncores': 1}))
iterations = FindNodes(Iteration).visit(op)
assert "reduction(+:f[0:f_vec->size[0]])" in iterations[1].pragmas[0].value
try:
op(time_M=1)
except:
# Older gcc <6.1 don't support reductions on array
info("Un-supported older gcc version for array reduction")
assert True
return
assert np.allclose(f.data, 18)
def test_incs_no_atomic(self):
"""
Test that `Inc`'s don't get a `#pragma omp atomic` if performing
an increment along a fully parallel loop.
"""
grid = Grid(shape=(8, 8, 8))
x, y, z = grid.dimensions
t = grid.stepping_dim
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid)
v = TimeFunction(name='v', grid=grid)
# Format: u(t, x, nastyness) += 1
uf = u[t, x, f, z]
# All loops get collapsed, but the `y` and `z` loops are PARALLEL_IF_ATOMIC,
# hence an atomic pragma is expected
op0 = Operator(Inc(uf, 1), opt=('advanced', {'openmp': True,
'par-collapse-ncores': 1}))
assert 'collapse(3)' in str(op0)
assert 'atomic' in str(op0)
# Now only `x` is parallelized
op1 = Operator([Eq(v[t, x, 0, 0], v[t, x, 0, 0] + 1), Inc(uf, 1)],
opt=('advanced', {'openmp': True, 'par-collapse-ncores': 1}))
assert 'collapse(1)' in str(op1)
assert 'atomic' not in str(op1)
class TestNestedParallelism(object):
def test_basic(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
op = Operator(Eq(u.forward, u + 1),
opt=('blocking', 'openmp', {'par-nested': 0,
'par-collapse-ncores': 10000,
'par-dynamic-work': 0}))
# Does it compile? Honoring the OpenMP specification isn't trivial
assert op.cfunction
# Does it produce the right result
op.apply(t_M=9)
assert np.all(u.data[0] == 10)
# Try again but this time supplying specific values for the num_threads
u.data[:] = 0.
op.apply(t_M=9, nthreads=1, nthreads_nested=2)
assert np.all(u.data[0] == 10)
assert op.arguments(t_M=9, nthreads_nested=2)['nthreads_nested'] == 2
iterations = FindNodes(Iteration).visit(op._func_table['bf0'])
assert iterations[0].pragmas[0].value == 'omp for collapse(1) schedule(dynamic,1)'
assert iterations[2].pragmas[0].value == ('omp parallel for collapse(1) '
'schedule(dynamic,1) '
'num_threads(nthreads_nested)')
def test_collapsing(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
op = Operator(Eq(u.forward, u + 1),
opt=('blocking', 'openmp', {'par-nested': 0,
'par-collapse-ncores': 1,
'par-collapse-work': 0,
'par-dynamic-work': 0}))
# Does it compile? Honoring the OpenMP specification isn't trivial
assert op.cfunction
# Does it produce the right result
op.apply(t_M=9)
assert np.all(u.data[0] == 10)
iterations = FindNodes(Iteration).visit(op._func_table['bf0'])
assert iterations[0].pragmas[0].value == 'omp for collapse(2) schedule(dynamic,1)'
assert iterations[2].pragmas[0].value == ('omp parallel for collapse(2) '
'schedule(dynamic,1) '
'num_threads(nthreads_nested)')
def test_multiple_subnests_v0(self):
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
eqn = Eq(u.forward, ((u[t, x, y, z] + u[t, x+1, y+1, z+1])*3*f +
(u[t, x+2, y+2, z+2] + u[t, x+3, y+3, z+3])*3*f + 1))
op = Operator(eqn, opt=('advanced', {'openmp': True,
'cire-mincost-sops': 1,
'par-nested': 0,
'par-collapse-ncores': 1,
'par-dynamic-work': 0}))
trees = retrieve_iteration_tree(op._func_table['bf0'].root)
assert len(trees) == 2
assert trees[0][0] is trees[1][0]
assert trees[0][0].pragmas[0].value ==\
'omp for collapse(2) schedule(dynamic,1)'
assert trees[0][2].pragmas[0].value == ('omp parallel for collapse(2) '
'schedule(dynamic,1) '
'num_threads(nthreads_nested)')
assert trees[1][2].pragmas[0].value == ('omp parallel for collapse(2) '
'schedule(dynamic,1) '
'num_threads(nthreads_nested)')
def test_multiple_subnests_v1(self):
"""
Unlike ``test_multiple_subnestes_v0``, now we use the ``cire-rotate=True``
option, which trades some of the inner parallelism for a smaller working set.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
eqn = Eq(u.forward, ((u[t, x, y, z] + u[t, x+1, y+1, z+1])*3*f +
(u[t, x+2, y+2, z+2] + u[t, x+3, y+3, z+3])*3*f + 1))
op = Operator(eqn, opt=('advanced', {'openmp': True,
'cire-mincost-sops': 1,
'cire-rotate': True,
'par-nested': 0,
'par-collapse-ncores': 1,
'par-dynamic-work': 0}))
trees = retrieve_iteration_tree(op._func_table['bf0'].root)
assert len(trees) == 2
assert trees[0][0] is trees[1][0]
assert trees[0][0].pragmas[0].value ==\
'omp for collapse(2) schedule(dynamic,1)'
assert not trees[0][2].pragmas
assert not trees[0][3].pragmas
assert trees[0][4].pragmas[0].value == ('omp parallel for collapse(1) '
'schedule(dynamic,1) '
'num_threads(nthreads_nested)')
assert not trees[1][2].pragmas
assert trees[1][3].pragmas[0].value == ('omp parallel for collapse(1) '
'schedule(dynamic,1) '
'num_threads(nthreads_nested)')
@pytest.mark.parametrize('blocklevels', [1, 2])
def test_nested_cache_blocking_structure_subdims(self, blocklevels):
"""
Test that:
* With non-local SubDimensions, nested blocking works fine when expected.
* With non-local SubDimensions, hierarchical nested blocking works fine
when expected.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
xi, yi, zi = grid.interior.dimensions
xl = SubDimension.left(name='xl', parent=x, thickness=4)
f = TimeFunction(name='f', grid=grid)
assert xl.local
# Non-local SubDimension -> nested blocking can works as expected
op = Operator(Eq(f.forward, f + 1, subdomain=grid.interior),
opt=('blocking', 'openmp',
{'par-nested': 0, 'blocklevels': blocklevels,
'par-collapse-ncores': 2,
'par-dynamic-work': 0}))
trees = retrieve_iteration_tree(op._func_table['bf0'].root)
assert len(trees) == 1
tree = trees[0]
assert len(tree) == 5 + (blocklevels - 1) * 2
assert tree[0].dim.is_Incr and tree[0].dim.parent is xi and tree[0].dim.root is x
assert tree[1].dim.is_Incr and tree[1].dim.parent is yi and tree[1].dim.root is y
assert tree[2].dim.is_Incr and tree[2].dim.parent is tree[0].dim and\
tree[2].dim.root is x
assert tree[3].dim.is_Incr and tree[3].dim.parent is tree[1].dim and\
tree[3].dim.root is y
if blocklevels == 1:
assert not tree[4].dim.is_Incr and tree[4].dim is zi and\
tree[4].dim.parent is z
elif blocklevels == 2:
assert tree[3].dim.is_Incr and tree[3].dim.parent is tree[1].dim and\
tree[3].dim.root is y
assert tree[4].dim.is_Incr and tree[4].dim.parent is tree[2].dim and\
tree[4].dim.root is x
assert tree[5].dim.is_Incr and tree[5].dim.parent is tree[3].dim and\
tree[5].dim.root is y
assert not tree[6].dim.is_Incr and tree[6].dim is zi and\
tree[6].dim.parent is z
assert trees[0][0].pragmas[0].value ==\
'omp for collapse(2) schedule(dynamic,1)'
assert trees[0][2].pragmas[0].value == ('omp parallel for collapse(2) '
'schedule(dynamic,1) '
'num_threads(nthreads_nested)')
|
|
import contextlib
import logging
import math
import shutil
import tempfile
import uuid
import warnings
import numpy as np
import pandas as pd
import tlz as toolz
from .. import base, config
from ..base import compute, compute_as_if_collection, is_dask_collection, tokenize
from ..highlevelgraph import HighLevelGraph
from ..layers import ShuffleLayer, SimpleShuffleLayer
from ..sizeof import sizeof
from ..utils import M, digit
from . import methods
from .core import DataFrame, Series, _Frame, map_partitions, new_dd_object
from .dispatch import group_split_dispatch, hash_object_dispatch
logger = logging.getLogger(__name__)
def _calculate_divisions(
df,
partition_col,
repartition,
npartitions,
upsample=1.0,
partition_size=128e6,
):
"""
Utility function to calculate divisions for calls to `map_partitions`
"""
sizes = df.map_partitions(sizeof) if repartition else []
divisions = partition_col._repartition_quantiles(npartitions, upsample=upsample)
mins = partition_col.map_partitions(M.min)
maxes = partition_col.map_partitions(M.max)
divisions, sizes, mins, maxes = base.compute(divisions, sizes, mins, maxes)
divisions = methods.tolist(divisions)
if type(sizes) is not list:
sizes = methods.tolist(sizes)
mins = methods.tolist(mins)
maxes = methods.tolist(maxes)
empty_dataframe_detected = pd.isnull(divisions).all()
if repartition or empty_dataframe_detected:
total = sum(sizes)
npartitions = max(math.ceil(total / partition_size), 1)
npartitions = min(npartitions, df.npartitions)
n = len(divisions)
try:
divisions = np.interp(
x=np.linspace(0, n - 1, npartitions + 1),
xp=np.linspace(0, n - 1, n),
fp=divisions,
).tolist()
except (TypeError, ValueError): # str type
indexes = np.linspace(0, n - 1, npartitions + 1).astype(int)
divisions = [divisions[i] for i in indexes]
mins = remove_nans(mins)
maxes = remove_nans(maxes)
if pd.api.types.is_categorical_dtype(partition_col.dtype):
dtype = partition_col.dtype
mins = pd.Categorical(mins, dtype=dtype).codes.tolist()
maxes = pd.Categorical(maxes, dtype=dtype).codes.tolist()
return divisions, mins, maxes
def sort_values(
df,
by,
npartitions=None,
ascending=True,
na_position="last",
upsample=1.0,
partition_size=128e6,
sort_function=None,
sort_function_kwargs=None,
**kwargs,
):
"""See DataFrame.sort_values for docstring"""
if na_position not in ("first", "last"):
raise ValueError("na_position must be either 'first' or 'last'")
if not isinstance(by, list):
by = [by]
if len(by) > 1 and df.npartitions > 1 or any(not isinstance(b, str) for b in by):
raise NotImplementedError(
"Dataframes only support sorting by named columns which must be passed as a "
"string or a list of strings; multi-partition dataframes only support sorting "
"by a single column.\n"
"You passed %s" % str(by)
)
sort_kwargs = {
"by": by,
"ascending": ascending,
"na_position": na_position,
}
if sort_function is None:
sort_function = M.sort_values
if sort_function_kwargs is not None:
sort_kwargs.update(sort_function_kwargs)
if df.npartitions == 1:
return df.map_partitions(sort_function, **sort_kwargs)
if npartitions == "auto":
repartition = True
npartitions = max(100, df.npartitions)
else:
if npartitions is None:
npartitions = df.npartitions
repartition = False
sort_by_col = df[by[0]]
divisions, mins, maxes = _calculate_divisions(
df, sort_by_col, repartition, npartitions, upsample, partition_size
)
if len(divisions) == 2:
return df.repartition(npartitions=1).map_partitions(
sort_function, **sort_kwargs
)
if not isinstance(ascending, bool):
# support [True] as input
if (
isinstance(ascending, list)
and len(ascending) == 1
and isinstance(ascending[0], bool)
):
ascending = ascending[0]
else:
raise NotImplementedError(
f"Dask currently only supports a single boolean for ascending. You passed {str(ascending)}"
)
if (
all(not pd.isna(x) for x in divisions)
and mins == sorted(mins, reverse=not ascending)
and maxes == sorted(maxes, reverse=not ascending)
and all(
mx < mn
for mx, mn in zip(
maxes[:-1] if ascending else maxes[1:],
mins[1:] if ascending else mins[:-1],
)
)
and npartitions == df.npartitions
):
# divisions are in the right place
return df.map_partitions(sort_function, **sort_kwargs)
df = rearrange_by_divisions(
df,
by,
divisions,
ascending=ascending,
na_position=na_position,
duplicates=False,
)
df = df.map_partitions(sort_function, **sort_kwargs)
return df
def set_index(
df,
index,
npartitions=None,
shuffle=None,
compute=False,
drop=True,
upsample=1.0,
divisions=None,
partition_size=128e6,
**kwargs,
):
"""See _Frame.set_index for docstring"""
if isinstance(index, Series) and index._name == df.index._name:
return df
if isinstance(index, (DataFrame, tuple, list)):
# Accept ["a"], but not [["a"]]
if (
isinstance(index, list)
and len(index) == 1
and not isinstance(index[0], list) # if index = [["a"]], leave it that way
):
index = index[0]
else:
raise NotImplementedError(
"Dask dataframe does not yet support multi-indexes.\n"
"You tried to index with this index: %s\n"
"Indexes must be single columns only." % str(index)
)
if npartitions == "auto":
repartition = True
npartitions = max(100, df.npartitions)
else:
if npartitions is None:
npartitions = df.npartitions
repartition = False
if not isinstance(index, Series):
index2 = df[index]
else:
index2 = index
if divisions is None:
divisions, mins, maxes = _calculate_divisions(
df, index2, repartition, npartitions, upsample, partition_size
)
if (
mins == sorted(mins)
and maxes == sorted(maxes)
and all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))
and npartitions == df.npartitions
):
divisions = mins + [maxes[-1]]
result = set_sorted_index(df, index, drop=drop, divisions=divisions)
return result.map_partitions(M.sort_index)
return set_partition(
df, index, divisions, shuffle=shuffle, drop=drop, compute=compute, **kwargs
)
def remove_nans(divisions):
"""Remove nans from divisions
These sometime pop up when we call min/max on an empty partition
Examples
--------
>>> remove_nans((np.nan, 1, 2))
[1, 1, 2]
>>> remove_nans((1, np.nan, 2))
[1, 2, 2]
>>> remove_nans((1, 2, np.nan))
[1, 2, 2]
"""
divisions = list(divisions)
for i in range(len(divisions) - 2, -1, -1):
if pd.isnull(divisions[i]):
divisions[i] = divisions[i + 1]
for i in range(len(divisions) - 1, -1, -1):
if not pd.isnull(divisions[i]):
for j in range(i + 1, len(divisions)):
divisions[j] = divisions[i]
break
return divisions
def set_partition(
df, index, divisions, max_branch=32, drop=True, shuffle=None, compute=None
):
"""Group DataFrame by index
Sets a new index and partitions data along that index according to
divisions. Divisions are often found by computing approximate quantiles.
The function ``set_index`` will do both of these steps.
Parameters
----------
df: DataFrame/Series
Data that we want to re-partition
index: string or Series
Column to become the new index
divisions: list
Values to form new divisions between partitions
drop: bool, default True
Whether to delete columns to be used as the new index
shuffle: str (optional)
Either 'disk' for an on-disk shuffle or 'tasks' to use the task
scheduling framework. Use 'disk' if you are on a single machine
and 'tasks' if you are on a distributed cluster.
max_branch: int (optional)
If using the task-based shuffle, the amount of splitting each
partition undergoes. Increase this for fewer copies but more
scheduler overhead.
See Also
--------
set_index
shuffle
partd
"""
meta = df._meta._constructor_sliced([0])
if isinstance(divisions, tuple):
# pd.isna considers tuples to be scalars. Convert to a list.
divisions = list(divisions)
if np.isscalar(index):
dtype = df[index].dtype
else:
dtype = index.dtype
if pd.isna(divisions).any() and pd.api.types.is_integer_dtype(dtype):
# Can't construct a Series[int64] when any / all of the divisions are NaN.
divisions = df._meta._constructor_sliced(divisions)
else:
divisions = df._meta._constructor_sliced(divisions, dtype=dtype)
if np.isscalar(index):
partitions = df[index].map_partitions(
set_partitions_pre, divisions=divisions, meta=meta
)
df2 = df.assign(_partitions=partitions)
else:
partitions = index.map_partitions(
set_partitions_pre, divisions=divisions, meta=meta
)
df2 = df.assign(_partitions=partitions, _index=index)
df3 = rearrange_by_column(
df2,
"_partitions",
max_branch=max_branch,
npartitions=len(divisions) - 1,
shuffle=shuffle,
compute=compute,
ignore_index=True,
)
if np.isscalar(index):
df4 = df3.map_partitions(
set_index_post_scalar,
index_name=index,
drop=drop,
column_dtype=df.columns.dtype,
)
else:
df4 = df3.map_partitions(
set_index_post_series,
index_name=index.name,
drop=drop,
column_dtype=df.columns.dtype,
)
df4.divisions = tuple(methods.tolist(divisions))
return df4.map_partitions(M.sort_index)
def shuffle(
df,
index,
shuffle=None,
npartitions=None,
max_branch=32,
ignore_index=False,
compute=None,
):
"""Group DataFrame by index
Hash grouping of elements. After this operation all elements that have
the same index will be in the same partition. Note that this requires
full dataset read, serialization and shuffle. This is expensive. If
possible you should avoid shuffles.
This does not preserve a meaningful index/partitioning scheme. This is not
deterministic if done in parallel.
See Also
--------
set_index
set_partition
shuffle_disk
"""
list_like = pd.api.types.is_list_like(index) and not is_dask_collection(index)
if shuffle == "tasks" and (isinstance(index, str) or list_like):
# Avoid creating the "_partitions" column if possible.
# We currently do this if the user is passing in
# specific column names (and shuffle == "tasks").
if isinstance(index, str):
index = [index]
else:
index = list(index)
nset = set(index)
if nset & set(df.columns) == nset:
return rearrange_by_column(
df,
index,
npartitions=npartitions,
max_branch=max_branch,
shuffle=shuffle,
ignore_index=ignore_index,
compute=compute,
)
if not isinstance(index, _Frame):
if list_like:
# Make sure we don't try to select with pd.Series/pd.Index
index = list(index)
index = df._select_columns_or_index(index)
elif hasattr(index, "to_frame"):
# If this is an index, we should still convert to a
# DataFrame. Otherwise, the hashed values of a column
# selection will not match (important when merging).
index = index.to_frame()
partitions = index.map_partitions(
partitioning_index,
npartitions=npartitions or df.npartitions,
meta=df._meta._constructor_sliced([0]),
transform_divisions=False,
)
df2 = df.assign(_partitions=partitions)
df2._meta.index.name = df._meta.index.name
df3 = rearrange_by_column(
df2,
"_partitions",
npartitions=npartitions,
max_branch=max_branch,
shuffle=shuffle,
compute=compute,
ignore_index=ignore_index,
)
del df3["_partitions"]
return df3
def rearrange_by_divisions(
df,
column,
divisions,
max_branch=None,
shuffle=None,
ascending=True,
na_position="last",
duplicates=True,
):
"""Shuffle dataframe so that column separates along divisions"""
divisions = df._meta._constructor_sliced(divisions)
# duplicates need to be removed sometimes to properly sort null dataframes
if not duplicates:
divisions = divisions.drop_duplicates()
meta = df._meta._constructor_sliced([0])
# Assign target output partitions to every row
partitions = df[column].map_partitions(
set_partitions_pre,
divisions=divisions,
ascending=ascending,
na_position=na_position,
meta=meta,
)
df2 = df.assign(_partitions=partitions)
# Perform shuffle
df3 = rearrange_by_column(
df2,
"_partitions",
max_branch=max_branch,
npartitions=len(divisions) - 1,
shuffle=shuffle,
)
del df3["_partitions"]
return df3
def rearrange_by_column(
df,
col,
npartitions=None,
max_branch=None,
shuffle=None,
compute=None,
ignore_index=False,
):
shuffle = shuffle or config.get("shuffle", None) or "disk"
# if the requested output partitions < input partitions
# we repartition first as shuffling overhead is
# proportionate to the number of input partitions
if npartitions is not None and npartitions < df.npartitions:
df = df.repartition(npartitions=npartitions)
if shuffle == "disk":
return rearrange_by_column_disk(df, col, npartitions, compute=compute)
elif shuffle == "tasks":
df2 = rearrange_by_column_tasks(
df, col, max_branch, npartitions, ignore_index=ignore_index
)
if ignore_index:
df2._meta = df2._meta.reset_index(drop=True)
return df2
else:
raise NotImplementedError("Unknown shuffle method %s" % shuffle)
class maybe_buffered_partd:
"""
If serialized, will return non-buffered partd. Otherwise returns a buffered partd
"""
def __init__(self, buffer=True, tempdir=None):
self.tempdir = tempdir or config.get("temporary_directory", None)
self.buffer = buffer
self.compression = config.get("dataframe.shuffle-compression", None)
def __reduce__(self):
if self.tempdir:
return (maybe_buffered_partd, (False, self.tempdir))
else:
return (maybe_buffered_partd, (False,))
def __call__(self, *args, **kwargs):
import partd
path = tempfile.mkdtemp(suffix=".partd", dir=self.tempdir)
try:
partd_compression = (
getattr(partd.compressed, self.compression)
if self.compression
else None
)
except AttributeError as e:
raise ImportError(
"Not able to import and load {} as compression algorithm."
"Please check if the library is installed and supported by Partd.".format(
self.compression
)
) from e
file = partd.File(path)
partd.file.cleanup_files.append(path)
# Envelope partd file with compression, if set and available
if partd_compression:
file = partd_compression(file)
if self.buffer:
return partd.PandasBlocks(partd.Buffer(partd.Dict(), file))
else:
return partd.PandasBlocks(file)
def rearrange_by_column_disk(df, column, npartitions=None, compute=False):
"""Shuffle using local disk
See Also
--------
rearrange_by_column_tasks:
Same function, but using tasks rather than partd
Has a more informative docstring
"""
if npartitions is None:
npartitions = df.npartitions
token = tokenize(df, column, npartitions)
always_new_token = uuid.uuid1().hex
p = ("zpartd-" + always_new_token,)
dsk1 = {p: (maybe_buffered_partd(),)}
# Partition data on disk
name = "shuffle-partition-" + always_new_token
dsk2 = {
(name, i): (shuffle_group_3, key, column, npartitions, p)
for i, key in enumerate(df.__dask_keys__())
}
dependencies = []
if compute:
graph = HighLevelGraph.merge(df.dask, dsk1, dsk2)
graph = HighLevelGraph.from_collections(name, graph, dependencies=[df])
keys = [p, sorted(dsk2)]
pp, values = compute_as_if_collection(DataFrame, graph, keys)
dsk1 = {p: pp}
dsk2 = dict(zip(sorted(dsk2), values))
else:
dependencies.append(df)
# Barrier
barrier_token = "barrier-" + always_new_token
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = "shuffle-collect-" + token
dsk4 = {
(name, i): (collect, p, i, df._meta, barrier_token) for i in range(npartitions)
}
divisions = (None,) * (npartitions + 1)
layer = toolz.merge(dsk1, dsk2, dsk3, dsk4)
graph = HighLevelGraph.from_collections(name, layer, dependencies=dependencies)
return new_dd_object(graph, name, df._meta, divisions)
def _noop(x, cleanup_token):
"""
A task that does nothing.
"""
return x
def rearrange_by_column_tasks(
df, column, max_branch=32, npartitions=None, ignore_index=False
):
"""Order divisions of DataFrame so that all values within column(s) align
This enacts a task-based shuffle. It contains most of the tricky logic
around the complex network of tasks. Typically before this function is
called a new column, ``"_partitions"`` has been added to the dataframe,
containing the output partition number of every row. This function
produces a new dataframe where every row is in the proper partition. It
accomplishes this by splitting each input partition into several pieces,
and then concatenating pieces from different input partitions into output
partitions. If there are enough partitions then it does this work in
stages to avoid scheduling overhead.
Lets explain the motivation for this further. Imagine that we have 1000
input partitions and 1000 output partitions. In theory we could split each
input into 1000 pieces, and then move the 1 000 000 resulting pieces
around, and then concatenate them all into 1000 output groups. This would
be fine, but the central scheduling overhead of 1 000 000 tasks would
become a bottleneck. Instead we do this in stages so that we split each of
the 1000 inputs into 30 pieces (we now have 30 000 pieces) move those
around, concatenate back down to 1000, and then do the same process again.
This has the same result as the full transfer, but now we've moved data
twice (expensive) but done so with only 60 000 tasks (cheap).
Note that the `column` input may correspond to a list of columns (rather
than just a single column name). In this case, the `shuffle_group` and
`shuffle_group_2` functions will use hashing to map each row to an output
partition. This approach may require the same rows to be hased multiple
times, but avoids the need to assign a new "_partitions" column.
Parameters
----------
df: dask.dataframe.DataFrame
column: str or list
A column name on which we want to split, commonly ``"_partitions"``
which is assigned by functions upstream. This could also be a list of
columns (in which case shuffle_group will create a hash array/column).
max_branch: int
The maximum number of splits per input partition. Defaults to 32.
If there are more partitions than this then the shuffling will occur in
stages in order to avoid creating npartitions**2 tasks
Increasing this number increases scheduling overhead but decreases the
number of full-dataset transfers that we have to make.
npartitions: Optional[int]
The desired number of output partitions
Returns
-------
df3: dask.dataframe.DataFrame
See also
--------
rearrange_by_column_disk: same operation, but uses partd
rearrange_by_column: parent function that calls this or rearrange_by_column_disk
shuffle_group: does the actual splitting per-partition
"""
max_branch = max_branch or 32
if (npartitions or df.npartitions) <= max_branch:
# We are creating a small number of output partitions.
# No need for staged shuffling. Staged shuffling will
# sometimes require extra work/communication in this case.
token = tokenize(df, column, npartitions)
shuffle_name = f"simple-shuffle-{token}"
npartitions = npartitions or df.npartitions
shuffle_layer = SimpleShuffleLayer(
shuffle_name,
column,
npartitions,
df.npartitions,
ignore_index,
df._name,
df._meta,
)
graph = HighLevelGraph.from_collections(
shuffle_name, shuffle_layer, dependencies=[df]
)
return new_dd_object(graph, shuffle_name, df._meta, [None] * (npartitions + 1))
n = df.npartitions
stages = int(math.ceil(math.log(n) / math.log(max_branch)))
if stages > 1:
k = int(math.ceil(n ** (1 / stages)))
else:
k = n
inputs = [tuple(digit(i, j, k) for j in range(stages)) for i in range(k**stages)]
npartitions_orig = df.npartitions
token = tokenize(df, stages, column, n, k)
for stage in range(stages):
stage_name = f"shuffle-{stage}-{token}"
stage_layer = ShuffleLayer(
stage_name,
column,
inputs,
stage,
npartitions,
n,
k,
ignore_index,
df._name,
df._meta,
)
graph = HighLevelGraph.from_collections(
stage_name, stage_layer, dependencies=[df]
)
df = new_dd_object(graph, stage_name, df._meta, df.divisions)
if npartitions is not None and npartitions != npartitions_orig:
token = tokenize(df, npartitions)
repartition_group_token = "repartition-group-" + token
dsk = {
(repartition_group_token, i): (
shuffle_group_2,
k,
column,
ignore_index,
npartitions,
)
for i, k in enumerate(df.__dask_keys__())
}
repartition_get_name = "repartition-get-" + token
for p in range(npartitions):
dsk[(repartition_get_name, p)] = (
shuffle_group_get,
(repartition_group_token, p % npartitions_orig),
p,
)
graph2 = HighLevelGraph.from_collections(
repartition_get_name, dsk, dependencies=[df]
)
df2 = new_dd_object(
graph2, repartition_get_name, df._meta, [None] * (npartitions + 1)
)
else:
df2 = df
df2.divisions = (None,) * (npartitions_orig + 1)
return df2
########################################################
# Various convenience functions to be run by the above #
########################################################
def partitioning_index(df, npartitions):
"""
Computes a deterministic index mapping each record to a partition.
Identical rows are mapped to the same partition.
Parameters
----------
df : DataFrame/Series/Index
npartitions : int
The number of partitions to group into.
Returns
-------
partitions : ndarray
An array of int64 values mapping each record to a partition.
"""
return hash_object_dispatch(df, index=False) % int(npartitions)
def barrier(args):
list(args)
return 0
def cleanup_partd_files(p, keys):
"""
Cleanup the files in a partd.File dataset.
Parameters
----------
p : partd.Interface
File or Encode wrapping a file should be OK.
keys: List
Just for scheduling purposes, not actually used.
"""
import partd
if isinstance(p, partd.Encode):
maybe_file = p.partd
else:
maybe_file
if isinstance(maybe_file, partd.File):
path = maybe_file.path
else:
path = None
if path:
shutil.rmtree(path, ignore_errors=True)
def collect(p, part, meta, barrier_token):
"""Collect partitions from partd, yield dataframes"""
with ensure_cleanup_on_exception(p):
res = p.get(part)
return res if len(res) > 0 else meta
def set_partitions_pre(s, divisions, ascending=True, na_position="last"):
try:
if ascending:
partitions = divisions.searchsorted(s, side="right") - 1
else:
partitions = len(divisions) - divisions.searchsorted(s, side="right") - 1
except TypeError:
# `searchsorted` fails if `s` contains nulls and strings
partitions = np.empty(len(s), dtype="int32")
not_null = s.notna()
if ascending:
partitions[not_null] = divisions.searchsorted(s[not_null], side="right") - 1
else:
partitions[not_null] = (
len(divisions) - divisions.searchsorted(s[not_null], side="right") - 1
)
partitions[(partitions < 0) | (partitions >= len(divisions) - 1)] = (
len(divisions) - 2 if ascending else 0
)
partitions[s.isna().values] = len(divisions) - 2 if na_position == "last" else 0
return partitions
def shuffle_group_2(df, cols, ignore_index, nparts):
if not len(df):
return {}, df
if isinstance(cols, str):
cols = [cols]
if cols and cols[0] == "_partitions":
ind = df[cols[0]].astype(np.int32)
else:
ind = (
hash_object_dispatch(df[cols] if cols else df, index=False) % int(nparts)
).astype(np.int32)
n = ind.max() + 1
result2 = group_split_dispatch(df, ind.values.view(), n, ignore_index=ignore_index)
return result2, df.iloc[:0]
def shuffle_group_get(g_head, i):
g, head = g_head
if i in g:
return g[i]
else:
return head
def shuffle_group(df, cols, stage, k, npartitions, ignore_index, nfinal):
"""Splits dataframe into groups
The group is determined by their final partition, and which stage we are in
in the shuffle
Parameters
----------
df: DataFrame
cols: str or list
Column name(s) on which to split the dataframe. If ``cols`` is not
"_partitions", hashing will be used to determine target partition
stage: int
We shuffle dataframes with many partitions we in a few stages to avoid
a quadratic number of tasks. This number corresponds to which stage
we're in, starting from zero up to some small integer
k: int
Desired number of splits from this dataframe
npartition: int
Total number of output partitions for the full dataframe
nfinal: int
Total number of output partitions after repartitioning
Returns
-------
out: Dict[int, DataFrame]
A dictionary mapping integers in {0..k} to dataframes such that the
hash values of ``df[col]`` are well partitioned.
"""
if isinstance(cols, str):
cols = [cols]
if cols and cols[0] == "_partitions":
ind = df[cols[0]]
else:
ind = hash_object_dispatch(df[cols] if cols else df, index=False)
if nfinal and nfinal != npartitions:
ind = ind % int(nfinal)
c = ind.values
typ = np.min_scalar_type(npartitions * 2)
c = np.mod(c, npartitions).astype(typ, copy=False)
np.floor_divide(c, k**stage, out=c)
np.mod(c, k, out=c)
return group_split_dispatch(df, c, k, ignore_index=ignore_index)
@contextlib.contextmanager
def ensure_cleanup_on_exception(p):
"""Ensure a partd.File is cleaned up.
We have several tasks referring to a `partd.File` instance. We want to
ensure that the file is cleaned up if and only if there's an exception
in the tasks using the `partd.File`.
"""
try:
yield
except Exception:
# the function (e.g. shuffle_group_3) had an internal exception.
# We'll cleanup our temporary files and re-raise.
try:
p.drop()
except Exception:
logger.exception("ignoring exception in ensure_cleanup_on_exception")
raise
def shuffle_group_3(df, col, npartitions, p):
with ensure_cleanup_on_exception(p):
g = df.groupby(col)
d = {i: g.get_group(i) for i in g.groups}
p.append(d, fsync=True)
def set_index_post_scalar(df, index_name, drop, column_dtype):
df2 = df.drop("_partitions", axis=1).set_index(index_name, drop=drop)
df2.columns = df2.columns.astype(column_dtype)
return df2
def set_index_post_series(df, index_name, drop, column_dtype):
df2 = df.drop("_partitions", axis=1).set_index("_index", drop=True)
df2.index.name = index_name
df2.columns = df2.columns.astype(column_dtype)
return df2
def drop_overlap(df, index):
return df.drop(index) if index in df.index else df
def get_overlap(df, index):
return df.loc[[index]] if index in df.index else df._constructor()
def fix_overlap(ddf, mins, maxes, lens):
"""Ensures that the upper bound on each partition of ddf (except the last) is exclusive
This is accomplished by first removing empty partitions, then altering existing
partitions as needed to include all the values for a particular index value in
one partition.
"""
name = "fix-overlap-" + tokenize(ddf, mins, maxes, lens)
non_empties = [i for i, l in enumerate(lens) if l != 0]
# drop empty partitions by mapping each partition in a new graph to a particular
# partition on the old graph.
dsk = {(name, i): (ddf._name, div) for i, div in enumerate(non_empties)}
divisions = tuple(mins) + (maxes[-1],)
overlap = [i for i in range(1, len(mins)) if mins[i] >= maxes[i - 1]]
frames = []
for i in overlap:
# `frames` is a list of data from previous partitions that we may want to
# move to partition i. Here, we add "overlap" from the previous partition
# (i-1) to this list.
frames.append((get_overlap, dsk[(name, i - 1)], divisions[i]))
# Make sure that any data added from partition i-1 to `frames` is removed
# from partition i-1.
dsk[(name, i - 1)] = (drop_overlap, dsk[(name, i - 1)], divisions[i])
# We do not want to move "overlap" from the previous partition (i-1) into
# this partition (i) if the data from this partition will need to be moved
# to the next partition (i+1) anyway. If we concatenate data too early,
# we may lose rows (https://github.com/dask/dask/issues/6972).
if i == len(mins) - 2 or divisions[i] != divisions[i + 1]:
frames.append(dsk[(name, i)])
dsk[(name, i)] = (methods.concat, frames)
frames = []
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])
return new_dd_object(graph, name, ddf._meta, divisions)
def _compute_partition_stats(column, allow_overlap=False, **kwargs) -> tuple:
"""For a given column, compute the min, max, and len of each partition.
And make sure that the partitions are sorted relative to each other.
NOTE: this does not guarantee that every partition is internally sorted.
"""
mins = column.map_partitions(M.min, meta=column)
maxes = column.map_partitions(M.max, meta=column)
lens = column.map_partitions(len, meta=column)
mins, maxes, lens = compute(mins, maxes, lens, **kwargs)
mins = remove_nans(mins)
maxes = remove_nans(maxes)
non_empty_mins = [m for m, l in zip(mins, lens) if l != 0]
non_empty_maxes = [m for m, l in zip(maxes, lens) if l != 0]
if (
sorted(non_empty_mins) != non_empty_mins
or sorted(non_empty_maxes) != non_empty_maxes
):
raise ValueError(
f"Partitions are not sorted ascending by {column.name or 'the index'}",
f"In your dataset the (min, max, len) values of {column.name or 'the index'} "
f"for each partition are : {list(zip(mins, maxes, lens))}",
)
if not allow_overlap and any(
a <= b for a, b in zip(non_empty_mins[1:], non_empty_maxes[:-1])
):
warnings.warn(
"Partitions have overlapping values, so divisions are non-unique."
"Use `set_index(sorted=True)` with no `divisions` to allow dask to fix the overlap. "
f"In your dataset the (min, max, len) values of {column.name or 'the index'} "
f"for each partition are : {list(zip(mins, maxes, lens))}",
UserWarning,
)
if not allow_overlap:
return (mins, maxes, lens)
else:
return (non_empty_mins, non_empty_maxes, lens)
def compute_divisions(df, col=None, **kwargs) -> tuple:
column = df.index if col is None else df[col]
mins, maxes, _ = _compute_partition_stats(column, allow_overlap=False, **kwargs)
return tuple(mins) + (maxes[-1],)
def compute_and_set_divisions(df, **kwargs):
mins, maxes, lens = _compute_partition_stats(df.index, allow_overlap=True, **kwargs)
if len(mins) == len(df.divisions) - 1:
df.divisions = tuple(mins) + (maxes[-1],)
if not any(mins[i] >= maxes[i - 1] for i in range(1, len(mins))):
return df
return fix_overlap(df, mins, maxes, lens)
def set_sorted_index(df, index, drop=True, divisions=None, **kwargs):
if not isinstance(index, Series):
meta = df._meta.set_index(index, drop=drop)
else:
meta = df._meta.set_index(index._meta, drop=drop)
result = map_partitions(M.set_index, df, index, drop=drop, meta=meta)
if not divisions:
return compute_and_set_divisions(result, **kwargs)
elif len(divisions) != len(df.divisions):
msg = (
"When doing `df.set_index(col, sorted=True, divisions=...)`, "
"divisions indicates known splits in the index column. In this "
"case divisions must be the same length as the existing "
"divisions in `df`\n\n"
"If the intent is to repartition into new divisions after "
"setting the index, you probably want:\n\n"
"`df.set_index(col, sorted=True).repartition(divisions=divisions)`"
)
raise ValueError(msg)
result.divisions = tuple(divisions)
return result
|
|
# coding=utf-8
"""Probabilistic Context Free Grammar."""
from collections import defaultdict
import random
import re
import sys
import os
VERBS = {"research": "researching", "mine": "mining", "craft": "crafting"}
class MaximumDepthExceeded(Exception):
"""Exception that is raised if the parse tree runs too deep."""
pass
class SymbolNotFound(Exception):
"""Fix yo grammar."""
pass
class Grammar(object):
grammars = {}
def __init__(self, grammar_string):
self.grammar = self.parse_grammar(grammar_string)
@classmethod
def load(cls, grammar_file):
with open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data",
grammar_file + ".grammar",
)
) as f:
cls.grammars[grammar_file] = cls(f.read())
return cls.grammars[grammar_file]
def weighted_choice(self, options, weights):
"""Choose a random item, according to weights.
Args:
options: list
weights: list of floats -- don't have to add up to 1
Returns:
element of options
"""
target = random.random() * sum(weights)
acc = 0
for idx, weight in enumerate(weights):
acc += weight
if acc > target:
return options[idx]
def parse_grammar(self, grammar):
"""Return a dictionary mapping symbols to extensions.
Example:
>>> grammar = '''
@s -> @n @v
@s -> @n @v @n
@n -> dog | cat
@v -> chases %3 | eats %2.0'''
>>> parse_grammar(grammar)
{
"@s": [
[ "@n @v", 0.3 ],
[ "@n @v @n", 0.7 ]
],
"@v": [
[ "chases", 0.75 ],
[ "eats", 0.25 ]
],
"@n": [
[ "dog", 0.5 ],
[ "cat", 0.5 ]
]
}
Args:
grammar: str
Returns:
dict
"""
weight_re = r"%((?:[\d]*\.)?[\d]+)"
result = defaultdict(list)
for line in grammar.splitlines():
if "->" in line:
symbol, extension = line.split("->")
for extension in extension.split("|"):
weight = re.search(weight_re, extension)
if weight:
extension = re.sub(weight_re, "", extension)
weight = float(weight.group(1))
else:
weight = 1.0
result[symbol.strip()].append((extension.strip(), weight))
# normalize
for symbol, extensions in result.items():
total_weight = sum(ext[1] for ext in extensions)
result[symbol] = [(ext[0], ext[1] / total_weight) for ext in extensions]
return dict(result)
def transform(self, parts, rule):
if rule == "gen":
if parts[-1].rstrip().endswith("s"):
parts[-1] = parts[-1].rstrip() + "'"
else:
parts[-1] = parts[-1].rstrip() + "'s"
if rule == "initial":
return [p[0].upper() for p in parts]
if rule == "title":
return [p if p in ("by", "of", "and") else p.capitalize() for p in parts]
return parts
def extend_rule(self, symbol="@s", max_depth=8):
"""Start with a symbol and returns a list of tokens.
Args:
symbol: str -- should start with @
max_depth: int -- maximum tree depth.
Returns:
list -- list of parts
Raises:
MaximumDepthExceeded
SymbolNotFound
"""
rule = None
if "~" in symbol:
symbol, rule = symbol.split("~")
if max_depth == 0:
raise MaximumDepthExceeded
if symbol not in self.grammar:
raise SymbolNotFound(symbol)
extension = self.weighted_choice(*zip(*self.grammar[symbol]))
result = self.extend_sentence(extension, max_depth)
return self.transform(result, rule)
def extend_sentence(self, sentence, max_depth=8):
result = []
for part in sentence.replace("\n", "\n ").split(" "):
if part.startswith("@"):
result.extend(self.extend_rule(part, max_depth - 1))
else:
result.append(part)
return result
# def extend_all(sentence, grammar, max_depth=8):
# if max_depth == 0:
# yield " ".join(sentence)
# else:
# if not isinstance(sentence, list):
# sentence = sentence.split()
# first_chars = [c[0] for c in sentence]
# try:
# part = first_chars.index("@")
# for extension, pr in grammar[sentence[part]]:
# for r in extend_all(sentence[:part] + [extension] + sentence[part + 1:], grammar, max_depth - 1):
# yield r
# except ValueError:
# yield " ".join(sentence)
def assemble_sentence(self, parts):
"""Clean up parts and applies some syntactic rules.
Args:
parts: list
Returns:
str
"""
sentence = " ".join(parts)
sentence = re.sub(r" ([,.!?])", r"\1", sentence)
sentence = re.sub(r"' ([A-Za-z0-9 ]+) '", r"'\1'", sentence)
sentence = re.sub(r" +", r" ", sentence)
sentence = re.sub(r"\n ", "\n", sentence)
return sentence.strip()
def generate(self, sentence=None):
"""Generate a sentence from a grammar string.
Args:
grammar: str
Returns:
str
"""
parts = None
while not parts:
try:
parts = self.extend_sentence(sentence)
except MaximumDepthExceeded:
pass
except SymbolNotFound as e:
print(f"WARNING: Symbol {e.args[0]} not found", file=sys.stderr)
return self.assemble_sentence(parts)
NAMES = Grammar.load("names")
GUILDS = Grammar.load("guild_names")
LETTER = Grammar.load("letter")
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, List, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ErrorResponseLinkedStorage(msrest.serialization.Model):
"""ErrorResponseLinkedStorage.
:ivar error: Error response indicates Insights service is not able to process the incoming
request. The reason is provided in the error message.
:vartype error:
~azure.mgmt.applicationinsights.v2020_06_02_preview.models.ErrorResponseLinkedStorageError
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponseLinkedStorageError'},
}
def __init__(
self,
*,
error: Optional["ErrorResponseLinkedStorageError"] = None,
**kwargs
):
"""
:keyword error: Error response indicates Insights service is not able to process the incoming
request. The reason is provided in the error message.
:paramtype error:
~azure.mgmt.applicationinsights.v2020_06_02_preview.models.ErrorResponseLinkedStorageError
"""
super(ErrorResponseLinkedStorage, self).__init__(**kwargs)
self.error = error
class ErrorResponseLinkedStorageError(msrest.serialization.Model):
"""Error response indicates Insights service is not able to process the incoming request. The reason is provided in the error message.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message indicating why the operation failed.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorResponseLinkedStorageError, self).__init__(**kwargs)
self.code = None
self.message = None
class LiveTokenResponse(msrest.serialization.Model):
"""The response to a live token query.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar live_token: JWT token for accessing live metrics stream data.
:vartype live_token: str
"""
_validation = {
'live_token': {'readonly': True},
}
_attribute_map = {
'live_token': {'key': 'liveToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(LiveTokenResponse, self).__init__(**kwargs)
self.live_token = None
class OperationInfo(msrest.serialization.Model):
"""Information about an operation.
:ivar provider: Name of the provider.
:vartype provider: str
:ivar resource: Name of the resource type.
:vartype resource: str
:ivar operation: Name of the operation.
:vartype operation: str
:ivar description: Description of the operation.
:vartype description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword provider: Name of the provider.
:paramtype provider: str
:keyword resource: Name of the resource type.
:paramtype resource: str
:keyword operation: Name of the operation.
:paramtype operation: str
:keyword description: Description of the operation.
:paramtype description: str
"""
super(OperationInfo, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationLive(msrest.serialization.Model):
"""Represents an operation returned by the GetOperations request.
:ivar name: Name of the operation.
:vartype name: str
:ivar display: Display name of the operation.
:vartype display: ~azure.mgmt.applicationinsights.v2020_06_02_preview.models.OperationInfo
:ivar origin: Origin of the operation.
:vartype origin: str
:ivar properties: Properties of the operation.
:vartype properties: any
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationInfo'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationInfo"] = None,
origin: Optional[str] = None,
properties: Optional[Any] = None,
**kwargs
):
"""
:keyword name: Name of the operation.
:paramtype name: str
:keyword display: Display name of the operation.
:paramtype display: ~azure.mgmt.applicationinsights.v2020_06_02_preview.models.OperationInfo
:keyword origin: Origin of the operation.
:paramtype origin: str
:keyword properties: Properties of the operation.
:paramtype properties: any
"""
super(OperationLive, self).__init__(**kwargs)
self.name = name
self.display = display
self.origin = origin
self.properties = properties
class OperationsListResult(msrest.serialization.Model):
"""Result of the List Operations operation.
:ivar value: A collection of operations.
:vartype value: list[~azure.mgmt.applicationinsights.v2020_06_02_preview.models.OperationLive]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationLive]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["OperationLive"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: A collection of operations.
:paramtype value:
list[~azure.mgmt.applicationinsights.v2020_06_02_preview.models.OperationLive]
:keyword next_link: URL to get the next set of operation list results if there are any.
:paramtype next_link: str
"""
super(OperationsListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
|
|
import os
import StringIO
from datetime import datetime, date
from django.utils.translation import ugettext_lazy as _
from django.core import urlresolvers
from django.db import models
from django.contrib.auth import models as auth_models
from filer.models.filemodels import File
from filer.utils.pil_exif import get_exif_for_file, set_exif_subject_location
from filer.settings import FILER_ADMIN_ICON_SIZES, FILER_PUBLICMEDIA_PREFIX, FILER_PRIVATEMEDIA_PREFIX, FILER_STATICMEDIA_PREFIX
from django.conf import settings
from sorl.thumbnail.main import DjangoThumbnail, build_thumbnail_name
from sorl.thumbnail.fields import ALL_ARGS
from PIL import Image as PILImage
class Image(File):
SIDEBAR_IMAGE_WIDTH = 210
DEFAULT_THUMBNAILS = {
'admin_clipboard_icon': {'size': (32,32), 'options': ['crop','upscale']},
'admin_sidebar_preview': {'size': (SIDEBAR_IMAGE_WIDTH,10000), 'options': []},
'admin_directory_listing_icon': {'size': (48,48), 'options': ['crop','upscale']},
'admin_tiny_icon': {'size': (32,32), 'options': ['crop','upscale']},
}
file_type = 'image'
_icon = "image"
_height = models.IntegerField(null=True, blank=True)
_width = models.IntegerField(null=True, blank=True)
date_taken = models.DateTimeField(_('date taken'), null=True, blank=True, editable=False)
default_alt_text = models.CharField(max_length=255, blank=True, null=True)
default_caption = models.CharField(max_length=255, blank=True, null=True)
author = models.CharField(max_length=255, null=True, blank=True)
must_always_publish_author_credit = models.BooleanField(default=False)
must_always_publish_copyright = models.BooleanField(default=False)
subject_location = models.CharField(max_length=64, null=True, blank=True, default=None)
def _check_validity(self):
if not self.name:# or not self.contact:
return False
return True
def sidebar_image_ratio(self):
if self.width:
return float(self.width)/float(self.SIDEBAR_IMAGE_WIDTH)
else:
return 1.0
def save(self, *args, **kwargs):
if self.date_taken is None:
try:
exif_date = self.exif.get('DateTimeOriginal',None)
if exif_date is not None:
d, t = str.split(exif_date.values)
year, month, day = d.split(':')
hour, minute, second = t.split(':')
self.date_taken = datetime(int(year), int(month), int(day),
int(hour), int(minute), int(second))
except:
pass
if self.date_taken is None:
self.date_taken = datetime.now()
#if not self.contact:
# self.contact = self.owner
self.has_all_mandatory_data = self._check_validity()
try:
if self.subject_location:
parts = self.subject_location.split(',')
pos_x = int(parts[0])
pos_y = int(parts[1])
sl = (int(pos_x), int(pos_y) )
exif_sl = self.exif.get('SubjectLocation', None)
if self._file and not sl == exif_sl:
#self._file.open()
fd_source = StringIO.StringIO(self._file.read())
#self._file.close()
set_exif_subject_location(sl, fd_source, self._file.path)
except:
# probably the image is missing. nevermind
pass
try:
# do this more efficient somehow?
self._width, self._height = PILImage.open(self._file).size
except Exception, e:
# probably the image is missing. nevermind.
pass
super(Image, self).save(*args, **kwargs)
def _get_exif(self):
if hasattr(self, '_exif_cache'):
return self._exif_cache
else:
if self._file:
self._exif_cache = get_exif_for_file(self._file.path)
else:
self._exif_cache = {}
return self._exif_cache
exif = property(_get_exif)
def has_edit_permission(self, request):
return self.has_generic_permission(request, 'edit')
def has_read_permission(self, request):
return self.has_generic_permission(request, 'read')
def has_add_children_permission(self, request):
return self.has_generic_permission(request, 'add_children')
def has_generic_permission(self, request, type):
"""
Return true if the current user has permission on this
image. Return the string 'ALL' if the user has all rights.
"""
user = request.user
if not user.is_authenticated() or not user.is_staff:
return False
elif user.is_superuser:
return True
elif user == self.owner:
return True
elif self.folder:
return self.folder.has_generic_permission(request, type)
else:
return False
@property
def label(self):
if self.name in ['',None]:
return self.original_filename or 'unnamed file'
else:
return self.name
@property
def width(self):
return self._width or 0
@property
def height(self):
return self._height or 0
@property
def icons(self):
if not getattr(self, '_icon_thumbnails_cache', False):
r = {}
for size in FILER_ADMIN_ICON_SIZES:
try:
args = {'size': (int(size),int(size)), 'options': ['crop','upscale']}
# Build the DjangoThumbnail kwargs.
kwargs = {}
for k, v in args.items():
kwargs[ALL_ARGS[k]] = v
# Build the destination filename and return the thumbnail.
name_kwargs = {}
for key in ['size', 'options', 'quality', 'basedir', 'subdir',
'prefix', 'extension']:
name_kwargs[key] = args.get(key)
source = self._file
dest = build_thumbnail_name(source.name, **name_kwargs)
r[size] = unicode(DjangoThumbnail(source, relative_dest=dest, **kwargs))
except Exception, e:
pass
setattr(self, '_icon_thumbnails_cache', r)
return getattr(self, '_icon_thumbnails_cache')
def _build_thumbnail(self, args):
try:
# Build the DjangoThumbnail kwargs.
kwargs = {}
for k, v in args.items():
kwargs[ALL_ARGS[k]] = v
# Build the destination filename and return the thumbnail.
name_kwargs = {}
for key in ['size', 'options', 'quality', 'basedir', 'subdir',
'prefix', 'extension']:
name_kwargs[key] = args.get(key)
source = self._file
dest = build_thumbnail_name(source.name, **name_kwargs)
return DjangoThumbnail(source, relative_dest=dest, **kwargs)
except:
return os.path.normpath(u"%s/icons/missingfile_%sx%s.png" % (FILER_STATICMEDIA_PREFIX, 32, 32,))
@property
def thumbnails(self):
# we build an extra dict here mainly
# to prevent the default errors to
# get thrown and to add a default missing
# image (not yet)
if not hasattr(self, '_thumbnails'):
tns = {}
for name, opts in Image.DEFAULT_THUMBNAILS.items():
tns[name] = unicode(self._build_thumbnail(opts))
self._thumbnails = tns
return self._thumbnails
@property
def absolute_image_url(self):
return self.url
@property
def rel_image_url(self):
'return the image url relative to MEDIA_URL'
try:
rel_url = u"%s" % self._file.url
if rel_url.startswith(settings.MEDIA_URL):
before, match, rel_url = rel_url.partition(settings.MEDIA_URL)
return rel_url
except Exception, e:
return ''
def get_admin_url_path(self):
return urlresolvers.reverse('admin:filer_image_change', args=(self.id,))
def __unicode__(self):
# this simulates the way a file field works and
# allows the sorl thumbnail tag to use the Image model
# as if it was a image field
return self.rel_image_url
class Meta:
app_label = 'filer'
verbose_name = _('Image')
verbose_name_plural = _('Images')
|
|
import os
import sys
import json
from datetime import date, timedelta
from .normalize import slugify, get_date_year, get_eprint_id, get_object_type, has_creator_ids, has_editor_ids, has_contributor_ids, make_label, get_sort_name, get_sort_year, get_sort_subject, get_sort_publication, get_sort_collection, get_sort_event, get_lastmod_date, get_sort_lastmod, get_sort_issn, get_sort_corp_creator, get_sort_place_of_pub
class Aggregator:
"""This class models the various Eprint aggregations used across Caltech Library repositories"""
def __init__(self, c_name, objs):
self.c_name = c_name
self.objs = objs
def aggregate_creator(self):
# now build our people list and create a people, eprint_id, title list
people = {}
for obj in self.objs:
if has_creator_ids(obj):
# For each author add a reference to object
for creator in obj['creators']:
creator_id = ''
if 'id' in creator:
creator_id = creator['id']
if 'creator_id' in creator:
creator_id = creator['creator_id']
creator_name = creator['display_name']
if creator_id != '':
if not creator_id in people:
people[creator_id] = {
'key': creator_id,
'label': creator_name,
'count' : 0,
'people_id': creator_id,
'sort_name': creator_name,
'objects' : []
}
people[creator_id]['count'] += 1
people[creator_id]['objects'].append(obj)
# Now that we have a people list we need to sort it by name
people_list = []
for key in people:
people_list.append(people[key])
people_list.sort(key = get_sort_name)
return people_list
def aggregate_editor(self):
# now build our people list based on editors.items
people = {}
for obj in self.objs:
if has_editor_ids(obj):
# For each author add a reference to object
for editor in obj['editors']:
editor_id = ''
if 'id' in editor:
editor_id = editor['id']
if 'editor_id' in editor:
editor_id = editor['editor_id']
editor_name = editor['display_name']
if editor_id != '':
if not editor_id in people:
people[editor_id] = {
'key': editor_id,
'label': editor_name,
'count' : 0,
'people_id': editor_id,
'sort_name': editor_name,
'objects' : []
}
people[editor_id]['count'] += 1
people[editor_id]['objects'].append(obj)
# Now that we have a people list we need to sort it by name
people_list = []
for key in people:
people_list.append(people[key])
people_list.sort(key = get_sort_name)
return people_list
def aggregate_contributor(self):
# now build our people list based on contributors.items
people = {}
for obj in self.objs:
if has_contributor_ids(obj):
# For each author add a reference to object
for contributor in obj['contributors']:
contributor_id = ''
if 'id' in contributor:
contributor_id = contributor['id']
if 'contributor_id' in contributor:
contributor_id = contributor['contributor_id']
contributor_name = contributor['display_name']
if contributor_id != '':
if not contributor_id in people:
people[contributor_id] = {
'key': contributor_id,
'label': contributor_name,
'count' : 0,
'people_id': contributor_id,
'sort_name': contributor_name,
'objects' : []
}
people[contributor_id]['count'] += 1
people[contributor_id]['objects'].append(obj)
# Now that we have a people list we need to sort it by name
people_list = []
for key in people:
people_list.append(people[key])
people_list.sort(key = get_sort_name)
return people_list
def aggregate_by_view_name(self, name, subject_map):
if name == 'person-az':
return self.aggregate_person_az()
elif name == 'person':
return self.aggregate_person()
elif name == 'author':
return self.aggregate_author()
elif name == 'editor':
return self.aggregate_editor()
elif name == 'contributor':
return self.aggregate_contributor()
elif name == 'year':
return self.aggregate_year()
elif name == 'publication':
return self.aggregate_publication()
elif name == 'place_of_pub':
return self.aggregate_place_of_pub()
elif name == 'corp_creators':
return self.aggregate_corp_creators()
elif name == 'issuing_body':
return self.aggregate_issuing_body()
elif name == 'issn':
return self.aggregate_issn()
elif name == 'collection':
return self.aggregate_collection()
elif name == 'event':
return self.aggregate_event()
elif name == 'subjects':
return self.aggregate_subjects(subject_map)
elif name == 'ids':
return self.aggregate_ids()
elif name == 'types':
return self.aggregate_types()
elif name == 'latest':
return self.aggregate_latest()
else:
print(f'WARNING: {name} is unknown aggregation type')
return None
def aggregate_person_az(self):
return self.aggregate_creator()
def aggregate_person(self):
return self.aggregate_creator()
def aggregate_author(self):
return self.aggregate_creator()
def aggregate_year(self):
years = {}
year = ''
for obj in self.objs:
if ('date' in obj):
year = obj['date'][0:4].strip()
if not year in years:
years[year] = {
'key': str(year),
'label': str(year),
'count': 0,
'year': year,
'objects': []
}
years[year]['count'] += 1
years[year]['objects'].append(obj)
year_list = []
for key in years:
year_list.append(years[key])
year_list.sort(key = get_sort_year, reverse = True)
return year_list
def aggregate_publication(self):
publications = {}
publication = ''
for obj in self.objs:
eprint_id = get_eprint_id(obj)
year = get_date_year(obj)
if ('publication' in obj):
publication = obj['publication']
key = slugify(publication)
if not publication in publications:
publications[publication] = {
'key': key,
'label': str(publication),
'count': 0,
'year': year,
'objects': []
}
publications[publication]['count'] += 1
publications[publication]['objects'].append(obj)
publication_list = []
for key in publications:
publication_list.append(publications[key])
publication_list.sort(key = get_sort_publication)
return publication_list
def aggregate_corp_creators(self):
corp_creators = {}
for obj in self.objs:
year = get_date_year(obj)
if ('corp_creators' in obj) and ('items' in obj['corp_creators']):
for item in obj['corp_creators']['items']:
corp_creator = item['name'].strip()
if 'id' in item:
key = str(item['id'])
else:
key = slugify(corp_creator)
if not key in corp_creators:
corp_creators[key] = {
'key': key,
'label': corp_creator,
'count': 0,
'year': year,
'objects': []
}
corp_creators[key]['count'] += 1
corp_creators[key]['objects'].append(obj)
corp_creator_list = []
for key in corp_creators:
corp_creator_list.append(corp_creators[key])
corp_creator_list.sort(key = get_sort_corp_creator)
return corp_creator_list
def aggregate_issuing_body(self):
return self.aggregate_corp_creators()
def aggregate_issn(self):
issns = {}
issn = ''
for obj in self.objs:
eprint_id = get_eprint_id(obj)
year = get_date_year(obj)
if ('issn' in obj):
issn = obj['issn']
if not issn in issns:
issns[issn] = {
'key': str(issn),
'label': str(issn),
'count': 0,
'year': year,
'objects': []
}
issns[issn]['count'] += 1
issns[issn]['objects'].append(obj)
issn_list = []
for key in issns:
issn_list.append(issns[key])
issn_list.sort(key = get_sort_issn)
return issn_list
def aggregate_place_of_pub(self):
place_of_pubs = {}
place_of_pub = ''
for obj in self.objs:
eprint_id = get_eprint_id(obj)
year = get_date_year(obj)
if ('place_of_pub' in obj):
place_of_pub = obj['place_of_pub'].strip()
key = slugify(place_of_pub)
if not place_of_pub in place_of_pubs:
place_of_pubs[place_of_pub] = {
'key': key,
'label': place_of_pub,
'count': 0,
'year': year,
'objects': []
}
place_of_pubs[place_of_pub]['count'] += 1
place_of_pubs[place_of_pub]['objects'].append(obj)
place_of_pub_list = []
for key in place_of_pubs:
place_of_pub_list.append(place_of_pubs[key])
place_of_pub_list.sort(key = get_sort_place_of_pub)
return place_of_pub_list
def aggregate_collection(self):
collections = {}
collection = ''
for obj in self.objs:
eprint_id = get_eprint_id(obj)
year = get_date_year(obj)
if ('collection' in obj):
collection = obj['collection']
key = slugify(collection)
if not collection in collections:
collections[collection] = {
'key': key,
'label': collection,
'count': 0,
'year': year,
'objects': []
}
collections[collection]['count'] += 1
collections[collection]['objects'].append(obj)
collection_list = []
for key in collections:
collection_list.append(collections[key])
collection_list.sort(key = get_sort_collection)
return collection_list
def aggregate_event(self):
events = {}
event_title = ''
for obj in self.objs:
eprint_id = get_eprint_id(obj)
year = get_date_year(obj)
event_title = ''
event_location = ''
event_dates = ''
if ('event_title' in obj):
event_title = obj['event_title']
if ('event_location' in obj):
event_location = obj['event_location']
if ('event_dates' in obj):
event_dates = obj['event_dates']
if event_title != '':
if not event_title in events:
key = slugify(event_title)
events[event_title] = {
'key': key,
'label': event_title,
'count': 0,
'year': year,
'objects': []
}
events[event_title]['count'] += 1
events[event_title]['objects'].append(obj)
event_list = []
for key in events:
event_list.append(events[key])
event_list.sort(key = get_sort_event)
return event_list
def aggregate_subjects(self, subject_map):
subjects = {}
subject = ''
for obj in self.objs:
eprint_id = get_eprint_id(obj)
year = get_date_year(obj)
if ('subjects' in obj):
for subj in obj['subjects']['items']:
subject_name = subject_map.get_subject(subj)
if subject_name != None:
if not subj in subjects:
subjects[subj] = {
'key': subj,
'label': subject_name,
'count': 0,
'subject_id': subj,
'subject_name': subject_name,
'objects': []
}
subjects[subj]['count'] += 1
subjects[subj]['objects'].append(obj)
subject_list= []
for key in subjects:
subject_list.append(subjects[key])
subject_list.sort(key = get_sort_subject)
return subject_list
def aggregate_ids(self):
ids = {}
for obj in self.objs:
eprint_id = get_eprint_id(obj)
if not eprint_id in ids:
ids[eprint_id] = {
'key': eprint_id,
'label': eprint_id,
'eprint_id': eprint_id,
'count': 0,
'objects': []
}
ids[eprint_id]['count'] += 1
ids[eprint_id]['objects'].append(obj)
ids_list = []
for key in ids:
ids_list.append(ids[key])
ids_list.sort(key = lambda x: int(x['key']))
return ids_list
def aggregate_types(self):
types = {}
for obj in self.objs:
o_type = get_object_type(obj)
label = make_label(o_type)
if not o_type in types:
types[o_type] = {
'key': o_type,
'label': label,
'type': o_type,
'count': 0,
'objects': []
}
types[o_type]['count'] += 1
types[o_type]['objects'].append(obj)
type_list = []
for o_type in types:
type_list.append(types[o_type])
type_list.sort(key = lambda x: x['key'])
return type_list
def aggregate_latest(self):
latest = {}
today = date.today()
td = timedelta(days = -7)
seven_days_ago = (today - td).isoformat()
for obj in self.objs:
lastmod = get_lastmod_date(obj)
if (lastmod != '') and (lastmod >= seven_days_ago):
key = get_sort_lastmod(obj)
year = get_date_year(obj)
if not key in latest:
lastest[lastmod] = {
'key': key,
'label': lastmod,
'year': year,
'count': 0,
'objects': []
}
latest[lastmod]['count'] += 1
latest[lastmod]['objects'].append(obj)
latest_list = []
for key in latest:
latest_list.append(latest[key])
latest_list.sort(key = lambda x: x['key'], reverse = True)
return latest_list
|
|
# Generated from JsonSQL.g4 by ANTLR 4.5.1
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\u009f")
buf.write("\u05ae\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4")
buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4")
buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080")
buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083")
buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087")
buf.write("\t\u0087\4\u0088\t\u0088\4\u0089\t\u0089\4\u008a\t\u008a")
buf.write("\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d\4\u008e")
buf.write("\t\u008e\4\u008f\t\u008f\4\u0090\t\u0090\4\u0091\t\u0091")
buf.write("\4\u0092\t\u0092\4\u0093\t\u0093\4\u0094\t\u0094\4\u0095")
buf.write("\t\u0095\4\u0096\t\u0096\4\u0097\t\u0097\4\u0098\t\u0098")
buf.write("\4\u0099\t\u0099\4\u009a\t\u009a\4\u009b\t\u009b\4\u009c")
buf.write("\t\u009c\4\u009d\t\u009d\4\u009e\t\u009e\4\u009f\t\u009f")
buf.write("\4\u00a0\t\u00a0\4\u00a1\t\u00a1\4\u00a2\t\u00a2\4\u00a3")
buf.write("\t\u00a3\4\u00a4\t\u00a4\4\u00a5\t\u00a5\4\u00a6\t\u00a6")
buf.write("\4\u00a7\t\u00a7\4\u00a8\t\u00a8\4\u00a9\t\u00a9\4\u00aa")
buf.write("\t\u00aa\4\u00ab\t\u00ab\4\u00ac\t\u00ac\4\u00ad\t\u00ad")
buf.write("\4\u00ae\t\u00ae\4\u00af\t\u00af\4\u00b0\t\u00b0\4\u00b1")
buf.write("\t\u00b1\4\u00b2\t\u00b2\4\u00b3\t\u00b3\4\u00b4\t\u00b4")
buf.write("\4\u00b5\t\u00b5\4\u00b6\t\u00b6\4\u00b7\t\u00b7\4\u00b8")
buf.write("\t\u00b8\4\u00b9\t\u00b9\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3")
buf.write("\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3")
buf.write("\f\3\f\3\f\3\r\3\r\3\16\3\16\3\17\3\17\3\17\3\20\3\20")
buf.write("\3\20\3\21\3\21\3\22\3\22\3\23\3\23\3\24\3\24\3\24\3\25")
buf.write("\3\25\3\26\3\26\3\26\3\27\3\27\3\27\3\30\3\30\3\30\3\31")
buf.write("\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33")
buf.write("\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\35\3\35\3\35")
buf.write("\3\35\3\35\3\35\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3\"\3\"")
buf.write("\3\"\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3")
buf.write("%\3%\3%\3%\3%\3%\3%\3%\3%\3&\3&\3&\3&\3&\3&\3&\3\'\3\'")
buf.write("\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3(\3(\3)\3)\3)\3*\3")
buf.write("*\3*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3-\3")
buf.write("-\3-\3-\3-\3-\3.\3.\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3")
buf.write("/\3/\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\61\3\61\3\61")
buf.write("\3\61\3\61\3\61\3\61\3\61\3\61\3\62\3\62\3\62\3\62\3\62")
buf.write("\3\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63\3\63\3\63\3\63")
buf.write("\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65\3\65")
buf.write("\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\66")
buf.write("\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66")
buf.write("\3\66\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67")
buf.write("\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67\38\38\38\38\3")
buf.write("8\38\38\38\38\39\39\39\39\39\39\39\39\3:\3:\3:\3:\3:\3")
buf.write(":\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3;\3;\3;\3;\3<\3<\3<\3")
buf.write("<\3<\3<\3<\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3>\3>\3?\3?\3")
buf.write("?\3?\3?\3?\3?\3?\3?\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3B\3")
buf.write("B\3B\3B\3B\3C\3C\3C\3C\3D\3D\3D\3D\3D\3D\3D\3E\3E\3E\3")
buf.write("E\3E\3E\3E\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3G\3G\3G\3G\3")
buf.write("G\3G\3G\3H\3H\3H\3H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3J\3J\3")
buf.write("J\3J\3K\3K\3K\3K\3K\3K\3K\3K\3L\3L\3L\3L\3L\3M\3M\3M\3")
buf.write("M\3M\3N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3O\3P\3P\3P\3P\3P\3")
buf.write("P\3P\3Q\3Q\3Q\3R\3R\3R\3R\3R\3R\3R\3S\3S\3S\3S\3S\3S\3")
buf.write("S\3S\3S\3S\3T\3T\3T\3U\3U\3U\3U\3U\3U\3V\3V\3V\3V\3V\3")
buf.write("V\3V\3V\3W\3W\3W\3W\3W\3W\3W\3W\3W\3W\3X\3X\3X\3X\3X\3")
buf.write("X\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3[\3[\3")
buf.write("[\3[\3[\3[\3[\3[\3[\3[\3\\\3\\\3\\\3\\\3\\\3]\3]\3]\3")
buf.write("^\3^\3^\3^\3^\3^\3^\3_\3_\3_\3_\3_\3`\3`\3`\3`\3a\3a\3")
buf.write("a\3a\3a\3b\3b\3b\3b\3b\3c\3c\3c\3c\3c\3c\3d\3d\3d\3d\3")
buf.write("d\3d\3e\3e\3e\3e\3e\3e\3e\3e\3f\3f\3f\3g\3g\3g\3g\3h\3")
buf.write("h\3h\3h\3h\3h\3h\3h\3i\3i\3i\3i\3i\3j\3j\3j\3k\3k\3k\3")
buf.write("k\3k\3k\3k\3l\3l\3l\3m\3m\3m\3n\3n\3n\3n\3n\3n\3o\3o\3")
buf.write("o\3o\3o\3o\3p\3p\3p\3p\3p\3q\3q\3q\3q\3q\3q\3q\3r\3r\3")
buf.write("r\3r\3r\3r\3r\3r\3s\3s\3s\3s\3s\3s\3t\3t\3t\3t\3t\3t\3")
buf.write("u\3u\3u\3u\3u\3u\3u\3u\3u\3u\3v\3v\3v\3v\3v\3v\3v\3v\3")
buf.write("v\3v\3v\3w\3w\3w\3w\3w\3w\3w\3x\3x\3x\3x\3x\3x\3x\3x\3")
buf.write("y\3y\3y\3y\3y\3y\3y\3y\3z\3z\3z\3z\3z\3z\3z\3{\3{\3{\3")
buf.write("{\3{\3{\3{\3{\3|\3|\3|\3|\3|\3|\3|\3|\3|\3}\3}\3}\3}\3")
buf.write("}\3}\3~\3~\3~\3~\3~\3~\3~\3~\3~\3\177\3\177\3\177\3\177")
buf.write("\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0080\3\u0080\3\u0081\3\u0081\3\u0081\3\u0081")
buf.write("\3\u0081\3\u0081\3\u0081\3\u0082\3\u0082\3\u0082\3\u0082")
buf.write("\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0084")
buf.write("\3\u0084\3\u0084\3\u0084\3\u0084\3\u0085\3\u0085\3\u0085")
buf.write("\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085")
buf.write("\3\u0086\3\u0086\3\u0086\3\u0086\3\u0086\3\u0087\3\u0087")
buf.write("\3\u0087\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write("\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0089")
buf.write("\3\u0089\3\u0089\3\u0089\3\u0089\3\u0089\3\u0089\3\u0089")
buf.write("\3\u008a\3\u008a\3\u008a\3\u008a\3\u008a\3\u008a\3\u008b")
buf.write("\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008c")
buf.write("\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c\3\u008d")
buf.write("\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d\3\u008e\3\u008e")
buf.write("\3\u008e\3\u008e\3\u008e\3\u008e\3\u008e\3\u008f\3\u008f")
buf.write("\3\u008f\3\u008f\3\u008f\3\u008f\3\u008f\3\u0090\3\u0090")
buf.write("\3\u0090\3\u0090\3\u0090\3\u0091\3\u0091\3\u0091\3\u0091")
buf.write("\3\u0091\3\u0091\3\u0091\3\u0091\3\u0092\3\u0092\3\u0092")
buf.write("\3\u0092\3\u0092\3\u0093\3\u0093\3\u0093\3\u0093\3\u0093")
buf.write("\3\u0093\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0095")
buf.write("\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095")
buf.write("\3\u0096\3\u0096\3\u0096\3\u0096\7\u0096\u04f2\n\u0096")
buf.write("\f\u0096\16\u0096\u04f5\13\u0096\3\u0096\3\u0096\3\u0096")
buf.write("\3\u0096\3\u0096\7\u0096\u04fc\n\u0096\f\u0096\16\u0096")
buf.write("\u04ff\13\u0096\3\u0096\3\u0096\3\u0096\7\u0096\u0504")
buf.write("\n\u0096\f\u0096\16\u0096\u0507\13\u0096\3\u0096\3\u0096")
buf.write("\3\u0096\7\u0096\u050c\n\u0096\f\u0096\16\u0096\u050f")
buf.write("\13\u0096\5\u0096\u0511\n\u0096\3\u0097\6\u0097\u0514")
buf.write("\n\u0097\r\u0097\16\u0097\u0515\3\u0097\3\u0097\7\u0097")
buf.write("\u051a\n\u0097\f\u0097\16\u0097\u051d\13\u0097\5\u0097")
buf.write("\u051f\n\u0097\3\u0097\3\u0097\5\u0097\u0523\n\u0097\3")
buf.write("\u0097\6\u0097\u0526\n\u0097\r\u0097\16\u0097\u0527\5")
buf.write("\u0097\u052a\n\u0097\3\u0097\3\u0097\6\u0097\u052e\n\u0097")
buf.write("\r\u0097\16\u0097\u052f\3\u0097\3\u0097\5\u0097\u0534")
buf.write("\n\u0097\3\u0097\6\u0097\u0537\n\u0097\r\u0097\16\u0097")
buf.write("\u0538\5\u0097\u053b\n\u0097\5\u0097\u053d\n\u0097\3\u0098")
buf.write("\3\u0098\7\u0098\u0541\n\u0098\f\u0098\16\u0098\u0544")
buf.write("\13\u0098\3\u0098\3\u0098\5\u0098\u0548\n\u0098\3\u0099")
buf.write("\3\u0099\3\u0099\3\u0099\7\u0099\u054e\n\u0099\f\u0099")
buf.write("\16\u0099\u0551\13\u0099\3\u0099\3\u0099\3\u009a\3\u009a")
buf.write("\3\u009a\3\u009b\3\u009b\3\u009b\3\u009b\7\u009b\u055c")
buf.write("\n\u009b\f\u009b\16\u009b\u055f\13\u009b\3\u009b\3\u009b")
buf.write("\3\u009c\3\u009c\3\u009c\3\u009c\7\u009c\u0567\n\u009c")
buf.write("\f\u009c\16\u009c\u056a\13\u009c\3\u009c\3\u009c\3\u009c")
buf.write("\5\u009c\u056f\n\u009c\3\u009c\3\u009c\3\u009d\3\u009d")
buf.write("\3\u009d\3\u009d\3\u009e\3\u009e\3\u009f\3\u009f\3\u00a0")
buf.write("\3\u00a0\3\u00a1\3\u00a1\3\u00a2\3\u00a2\3\u00a3\3\u00a3")
buf.write("\3\u00a4\3\u00a4\3\u00a5\3\u00a5\3\u00a6\3\u00a6\3\u00a7")
buf.write("\3\u00a7\3\u00a8\3\u00a8\3\u00a9\3\u00a9\3\u00aa\3\u00aa")
buf.write("\3\u00ab\3\u00ab\3\u00ac\3\u00ac\3\u00ad\3\u00ad\3\u00ae")
buf.write("\3\u00ae\3\u00af\3\u00af\3\u00b0\3\u00b0\3\u00b1\3\u00b1")
buf.write("\3\u00b2\3\u00b2\3\u00b3\3\u00b3\3\u00b4\3\u00b4\3\u00b5")
buf.write("\3\u00b5\3\u00b6\3\u00b6\3\u00b7\3\u00b7\3\u00b8\3\u00b8")
buf.write("\3\u00b9\3\u00b9\3\u0568\2\u00ba\3\3\5\4\7\5\t\6\13\7")
buf.write("\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20\37\21")
buf.write("!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67")
buf.write("\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61")
buf.write("a\62c\63e\64g\65i\66k\67m8o9q:s;u<w=y>{?}@\177A\u0081")
buf.write("B\u0083C\u0085D\u0087E\u0089F\u008bG\u008dH\u008fI\u0091")
buf.write("J\u0093K\u0095L\u0097M\u0099N\u009bO\u009dP\u009fQ\u00a1")
buf.write("R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00afY\u00b1")
buf.write("Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bfa\u00c1")
buf.write("b\u00c3c\u00c5d\u00c7e\u00c9f\u00cbg\u00cdh\u00cfi\u00d1")
buf.write("j\u00d3k\u00d5l\u00d7m\u00d9n\u00dbo\u00ddp\u00dfq\u00e1")
buf.write("r\u00e3s\u00e5t\u00e7u\u00e9v\u00ebw\u00edx\u00efy\u00f1")
buf.write("z\u00f3{\u00f5|\u00f7}\u00f9~\u00fb\177\u00fd\u0080\u00ff")
buf.write("\u0081\u0101\u0082\u0103\u0083\u0105\u0084\u0107\u0085")
buf.write("\u0109\u0086\u010b\u0087\u010d\u0088\u010f\u0089\u0111")
buf.write("\u008a\u0113\u008b\u0115\u008c\u0117\u008d\u0119\u008e")
buf.write("\u011b\u008f\u011d\u0090\u011f\u0091\u0121\u0092\u0123")
buf.write("\u0093\u0125\u0094\u0127\u0095\u0129\u0096\u012b\u0097")
buf.write("\u012d\u0098\u012f\u0099\u0131\u009a\u0133\u009b\u0135")
buf.write("\u009c\u0137\u009d\u0139\u009e\u013b\u009f\u013d\2\u013f")
buf.write("\2\u0141\2\u0143\2\u0145\2\u0147\2\u0149\2\u014b\2\u014d")
buf.write("\2\u014f\2\u0151\2\u0153\2\u0155\2\u0157\2\u0159\2\u015b")
buf.write("\2\u015d\2\u015f\2\u0161\2\u0163\2\u0165\2\u0167\2\u0169")
buf.write("\2\u016b\2\u016d\2\u016f\2\u0171\2\3\2\'\3\2$$\3\2bb\3")
buf.write("\2__\5\2C\\aac|\6\2\62;C\\aac|\4\2--//\5\2&&<<BB\3\2)")
buf.write(")\4\2\f\f\17\17\5\2\13\r\17\17\"\"\3\2\62;\4\2CCcc\4\2")
buf.write("DDdd\4\2EEee\4\2FFff\4\2GGgg\4\2HHhh\4\2IIii\4\2JJjj\4")
buf.write("\2KKkk\4\2LLll\4\2MMmm\4\2NNnn\4\2OOoo\4\2PPpp\4\2QQq")
buf.write("q\4\2RRrr\4\2SSss\4\2TTtt\4\2UUuu\4\2VVvv\4\2WWww\4\2")
buf.write("XXxx\4\2YYyy\4\2ZZzz\4\2[[{{\4\2\\\\||\u05ad\2\3\3\2\2")
buf.write("\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2")
buf.write("\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25")
buf.write("\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3")
buf.write("\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2")
buf.write("\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2")
buf.write("\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\2")
buf.write("9\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2")
buf.write("\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2")
buf.write("\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2")
buf.write("\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3")
buf.write("\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i")
buf.write("\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2")
buf.write("s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2")
buf.write("\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2\2\2\u0083\3\2")
buf.write("\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2")
buf.write("\u008b\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091")
buf.write("\3\2\2\2\2\u0093\3\2\2\2\2\u0095\3\2\2\2\2\u0097\3\2\2")
buf.write("\2\2\u0099\3\2\2\2\2\u009b\3\2\2\2\2\u009d\3\2\2\2\2\u009f")
buf.write("\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2")
buf.write("\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad")
buf.write("\3\2\2\2\2\u00af\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2")
buf.write("\2\2\u00b5\3\2\2\2\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb")
buf.write("\3\2\2\2\2\u00bd\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2")
buf.write("\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9")
buf.write("\3\2\2\2\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2")
buf.write("\2\2\u00d1\3\2\2\2\2\u00d3\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7")
buf.write("\3\2\2\2\2\u00d9\3\2\2\2\2\u00db\3\2\2\2\2\u00dd\3\2\2")
buf.write("\2\2\u00df\3\2\2\2\2\u00e1\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5")
buf.write("\3\2\2\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2")
buf.write("\2\2\u00ed\3\2\2\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3")
buf.write("\3\2\2\2\2\u00f5\3\2\2\2\2\u00f7\3\2\2\2\2\u00f9\3\2\2")
buf.write("\2\2\u00fb\3\2\2\2\2\u00fd\3\2\2\2\2\u00ff\3\2\2\2\2\u0101")
buf.write("\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2\2\2\2\u0107\3\2\2")
buf.write("\2\2\u0109\3\2\2\2\2\u010b\3\2\2\2\2\u010d\3\2\2\2\2\u010f")
buf.write("\3\2\2\2\2\u0111\3\2\2\2\2\u0113\3\2\2\2\2\u0115\3\2\2")
buf.write("\2\2\u0117\3\2\2\2\2\u0119\3\2\2\2\2\u011b\3\2\2\2\2\u011d")
buf.write("\3\2\2\2\2\u011f\3\2\2\2\2\u0121\3\2\2\2\2\u0123\3\2\2")
buf.write("\2\2\u0125\3\2\2\2\2\u0127\3\2\2\2\2\u0129\3\2\2\2\2\u012b")
buf.write("\3\2\2\2\2\u012d\3\2\2\2\2\u012f\3\2\2\2\2\u0131\3\2\2")
buf.write("\2\2\u0133\3\2\2\2\2\u0135\3\2\2\2\2\u0137\3\2\2\2\2\u0139")
buf.write("\3\2\2\2\2\u013b\3\2\2\2\3\u0173\3\2\2\2\5\u0175\3\2\2")
buf.write("\2\7\u0177\3\2\2\2\t\u0179\3\2\2\2\13\u017b\3\2\2\2\r")
buf.write("\u017d\3\2\2\2\17\u017f\3\2\2\2\21\u0181\3\2\2\2\23\u0183")
buf.write("\3\2\2\2\25\u0185\3\2\2\2\27\u0187\3\2\2\2\31\u018a\3")
buf.write("\2\2\2\33\u018c\3\2\2\2\35\u018e\3\2\2\2\37\u0191\3\2")
buf.write("\2\2!\u0194\3\2\2\2#\u0196\3\2\2\2%\u0198\3\2\2\2\'\u019a")
buf.write("\3\2\2\2)\u019d\3\2\2\2+\u019f\3\2\2\2-\u01a2\3\2\2\2")
buf.write("/\u01a5\3\2\2\2\61\u01a8\3\2\2\2\63\u01ab\3\2\2\2\65\u01b1")
buf.write("\3\2\2\2\67\u01b8\3\2\2\29\u01bc\3\2\2\2;\u01c2\3\2\2")
buf.write("\2=\u01c6\3\2\2\2?\u01cc\3\2\2\2A\u01d4\3\2\2\2C\u01d8")
buf.write("\3\2\2\2E\u01db\3\2\2\2G\u01df\3\2\2\2I\u01e6\3\2\2\2")
buf.write("K\u01f4\3\2\2\2M\u01fb\3\2\2\2O\u0201\3\2\2\2Q\u0209\3")
buf.write("\2\2\2S\u020c\3\2\2\2U\u0214\3\2\2\2W\u0219\3\2\2\2Y\u021e")
buf.write("\3\2\2\2[\u0224\3\2\2\2]\u022c\3\2\2\2_\u0233\3\2\2\2")
buf.write("a\u023a\3\2\2\2c\u0243\3\2\2\2e\u024e\3\2\2\2g\u0255\3")
buf.write("\2\2\2i\u025b\3\2\2\2k\u0268\3\2\2\2m\u0275\3\2\2\2o\u0287")
buf.write("\3\2\2\2q\u0290\3\2\2\2s\u0298\3\2\2\2u\u02a3\3\2\2\2")
buf.write("w\u02ac\3\2\2\2y\u02b3\3\2\2\2{\u02b8\3\2\2\2}\u02bf\3")
buf.write("\2\2\2\177\u02c8\3\2\2\2\u0081\u02cd\3\2\2\2\u0083\u02d2")
buf.write("\3\2\2\2\u0085\u02d7\3\2\2\2\u0087\u02db\3\2\2\2\u0089")
buf.write("\u02e2\3\2\2\2\u008b\u02e9\3\2\2\2\u008d\u02f3\3\2\2\2")
buf.write("\u008f\u02fa\3\2\2\2\u0091\u0302\3\2\2\2\u0093\u0307\3")
buf.write("\2\2\2\u0095\u030b\3\2\2\2\u0097\u0313\3\2\2\2\u0099\u0318")
buf.write("\3\2\2\2\u009b\u031d\3\2\2\2\u009d\u0322\3\2\2\2\u009f")
buf.write("\u0328\3\2\2\2\u00a1\u032f\3\2\2\2\u00a3\u0332\3\2\2\2")
buf.write("\u00a5\u0339\3\2\2\2\u00a7\u0343\3\2\2\2\u00a9\u0346\3")
buf.write("\2\2\2\u00ab\u034c\3\2\2\2\u00ad\u0354\3\2\2\2\u00af\u035e")
buf.write("\3\2\2\2\u00b1\u0364\3\2\2\2\u00b3\u036b\3\2\2\2\u00b5")
buf.write("\u0373\3\2\2\2\u00b7\u037d\3\2\2\2\u00b9\u0382\3\2\2\2")
buf.write("\u00bb\u0385\3\2\2\2\u00bd\u038c\3\2\2\2\u00bf\u0391\3")
buf.write("\2\2\2\u00c1\u0395\3\2\2\2\u00c3\u039a\3\2\2\2\u00c5\u039f")
buf.write("\3\2\2\2\u00c7\u03a5\3\2\2\2\u00c9\u03ab\3\2\2\2\u00cb")
buf.write("\u03b3\3\2\2\2\u00cd\u03b6\3\2\2\2\u00cf\u03ba\3\2\2\2")
buf.write("\u00d1\u03c2\3\2\2\2\u00d3\u03c7\3\2\2\2\u00d5\u03ca\3")
buf.write("\2\2\2\u00d7\u03d1\3\2\2\2\u00d9\u03d4\3\2\2\2\u00db\u03d7")
buf.write("\3\2\2\2\u00dd\u03dd\3\2\2\2\u00df\u03e3\3\2\2\2\u00e1")
buf.write("\u03e8\3\2\2\2\u00e3\u03ef\3\2\2\2\u00e5\u03f7\3\2\2\2")
buf.write("\u00e7\u03fd\3\2\2\2\u00e9\u0403\3\2\2\2\u00eb\u040d\3")
buf.write("\2\2\2\u00ed\u0418\3\2\2\2\u00ef\u041f\3\2\2\2\u00f1\u0427")
buf.write("\3\2\2\2\u00f3\u042f\3\2\2\2\u00f5\u0436\3\2\2\2\u00f7")
buf.write("\u043e\3\2\2\2\u00f9\u0447\3\2\2\2\u00fb\u044d\3\2\2\2")
buf.write("\u00fd\u0456\3\2\2\2\u00ff\u045a\3\2\2\2\u0101\u0464\3")
buf.write("\2\2\2\u0103\u046b\3\2\2\2\u0105\u046f\3\2\2\2\u0107\u0475")
buf.write("\3\2\2\2\u0109\u047a\3\2\2\2\u010b\u0484\3\2\2\2\u010d")
buf.write("\u0489\3\2\2\2\u010f\u048c\3\2\2\2\u0111\u0498\3\2\2\2")
buf.write("\u0113\u04a0\3\2\2\2\u0115\u04a6\3\2\2\2\u0117\u04ad\3")
buf.write("\2\2\2\u0119\u04b4\3\2\2\2\u011b\u04ba\3\2\2\2\u011d\u04c1")
buf.write("\3\2\2\2\u011f\u04c8\3\2\2\2\u0121\u04cd\3\2\2\2\u0123")
buf.write("\u04d5\3\2\2\2\u0125\u04da\3\2\2\2\u0127\u04e0\3\2\2\2")
buf.write("\u0129\u04e5\3\2\2\2\u012b\u0510\3\2\2\2\u012d\u053c\3")
buf.write("\2\2\2\u012f\u0547\3\2\2\2\u0131\u0549\3\2\2\2\u0133\u0554")
buf.write("\3\2\2\2\u0135\u0557\3\2\2\2\u0137\u0562\3\2\2\2\u0139")
buf.write("\u0572\3\2\2\2\u013b\u0576\3\2\2\2\u013d\u0578\3\2\2\2")
buf.write("\u013f\u057a\3\2\2\2\u0141\u057c\3\2\2\2\u0143\u057e\3")
buf.write("\2\2\2\u0145\u0580\3\2\2\2\u0147\u0582\3\2\2\2\u0149\u0584")
buf.write("\3\2\2\2\u014b\u0586\3\2\2\2\u014d\u0588\3\2\2\2\u014f")
buf.write("\u058a\3\2\2\2\u0151\u058c\3\2\2\2\u0153\u058e\3\2\2\2")
buf.write("\u0155\u0590\3\2\2\2\u0157\u0592\3\2\2\2\u0159\u0594\3")
buf.write("\2\2\2\u015b\u0596\3\2\2\2\u015d\u0598\3\2\2\2\u015f\u059a")
buf.write("\3\2\2\2\u0161\u059c\3\2\2\2\u0163\u059e\3\2\2\2\u0165")
buf.write("\u05a0\3\2\2\2\u0167\u05a2\3\2\2\2\u0169\u05a4\3\2\2\2")
buf.write("\u016b\u05a6\3\2\2\2\u016d\u05a8\3\2\2\2\u016f\u05aa\3")
buf.write("\2\2\2\u0171\u05ac\3\2\2\2\u0173\u0174\7=\2\2\u0174\4")
buf.write("\3\2\2\2\u0175\u0176\7\60\2\2\u0176\6\3\2\2\2\u0177\u0178")
buf.write("\7*\2\2\u0178\b\3\2\2\2\u0179\u017a\7+\2\2\u017a\n\3\2")
buf.write("\2\2\u017b\u017c\7.\2\2\u017c\f\3\2\2\2\u017d\u017e\7")
buf.write("?\2\2\u017e\16\3\2\2\2\u017f\u0180\7,\2\2\u0180\20\3\2")
buf.write("\2\2\u0181\u0182\7-\2\2\u0182\22\3\2\2\2\u0183\u0184\7")
buf.write("/\2\2\u0184\24\3\2\2\2\u0185\u0186\7\u0080\2\2\u0186\26")
buf.write("\3\2\2\2\u0187\u0188\7~\2\2\u0188\u0189\7~\2\2\u0189\30")
buf.write("\3\2\2\2\u018a\u018b\7\61\2\2\u018b\32\3\2\2\2\u018c\u018d")
buf.write("\7\'\2\2\u018d\34\3\2\2\2\u018e\u018f\7>\2\2\u018f\u0190")
buf.write("\7>\2\2\u0190\36\3\2\2\2\u0191\u0192\7@\2\2\u0192\u0193")
buf.write("\7@\2\2\u0193 \3\2\2\2\u0194\u0195\7(\2\2\u0195\"\3\2")
buf.write("\2\2\u0196\u0197\7~\2\2\u0197$\3\2\2\2\u0198\u0199\7>")
buf.write("\2\2\u0199&\3\2\2\2\u019a\u019b\7>\2\2\u019b\u019c\7?")
buf.write("\2\2\u019c(\3\2\2\2\u019d\u019e\7@\2\2\u019e*\3\2\2\2")
buf.write("\u019f\u01a0\7@\2\2\u01a0\u01a1\7?\2\2\u01a1,\3\2\2\2")
buf.write("\u01a2\u01a3\7?\2\2\u01a3\u01a4\7?\2\2\u01a4.\3\2\2\2")
buf.write("\u01a5\u01a6\7#\2\2\u01a6\u01a7\7?\2\2\u01a7\60\3\2\2")
buf.write("\2\u01a8\u01a9\7>\2\2\u01a9\u01aa\7@\2\2\u01aa\62\3\2")
buf.write("\2\2\u01ab\u01ac\5\u013f\u00a0\2\u01ac\u01ad\5\u0141\u00a1")
buf.write("\2\u01ad\u01ae\5\u015b\u00ae\2\u01ae\u01af\5\u0161\u00b1")
buf.write("\2\u01af\u01b0\5\u0165\u00b3\2\u01b0\64\3\2\2\2\u01b1")
buf.write("\u01b2\5\u013f\u00a0\2\u01b2\u01b3\5\u0143\u00a2\2\u01b3")
buf.write("\u01b4\5\u0165\u00b3\2\u01b4\u01b5\5\u014f\u00a8\2\u01b5")
buf.write("\u01b6\5\u015b\u00ae\2\u01b6\u01b7\5\u0159\u00ad\2\u01b7")
buf.write("\66\3\2\2\2\u01b8\u01b9\5\u013f\u00a0\2\u01b9\u01ba\5")
buf.write("\u0145\u00a3\2\u01ba\u01bb\5\u0145\u00a3\2\u01bb8\3\2")
buf.write("\2\2\u01bc\u01bd\5\u013f\u00a0\2\u01bd\u01be\5\u0149\u00a5")
buf.write("\2\u01be\u01bf\5\u0165\u00b3\2\u01bf\u01c0\5\u0147\u00a4")
buf.write("\2\u01c0\u01c1\5\u0161\u00b1\2\u01c1:\3\2\2\2\u01c2\u01c3")
buf.write("\5\u013f\u00a0\2\u01c3\u01c4\5\u0155\u00ab\2\u01c4\u01c5")
buf.write("\5\u0155\u00ab\2\u01c5<\3\2\2\2\u01c6\u01c7\5\u013f\u00a0")
buf.write("\2\u01c7\u01c8\5\u0155\u00ab\2\u01c8\u01c9\5\u0165\u00b3")
buf.write("\2\u01c9\u01ca\5\u0147\u00a4\2\u01ca\u01cb\5\u0161\u00b1")
buf.write("\2\u01cb>\3\2\2\2\u01cc\u01cd\5\u013f\u00a0\2\u01cd\u01ce")
buf.write("\5\u0159\u00ad\2\u01ce\u01cf\5\u013f\u00a0\2\u01cf\u01d0")
buf.write("\5\u0155\u00ab\2\u01d0\u01d1\5\u016f\u00b8\2\u01d1\u01d2")
buf.write("\5\u0171\u00b9\2\u01d2\u01d3\5\u0147\u00a4\2\u01d3@\3")
buf.write("\2\2\2\u01d4\u01d5\5\u013f\u00a0\2\u01d5\u01d6\5\u0159")
buf.write("\u00ad\2\u01d6\u01d7\5\u0145\u00a3\2\u01d7B\3\2\2\2\u01d8")
buf.write("\u01d9\5\u013f\u00a0\2\u01d9\u01da\5\u0163\u00b2\2\u01da")
buf.write("D\3\2\2\2\u01db\u01dc\5\u013f\u00a0\2\u01dc\u01dd\5\u0163")
buf.write("\u00b2\2\u01dd\u01de\5\u0143\u00a2\2\u01deF\3\2\2\2\u01df")
buf.write("\u01e0\5\u013f\u00a0\2\u01e0\u01e1\5\u0165\u00b3\2\u01e1")
buf.write("\u01e2\5\u0165\u00b3\2\u01e2\u01e3\5\u013f\u00a0\2\u01e3")
buf.write("\u01e4\5\u0143\u00a2\2\u01e4\u01e5\5\u014d\u00a7\2\u01e5")
buf.write("H\3\2\2\2\u01e6\u01e7\5\u013f\u00a0\2\u01e7\u01e8\5\u0167")
buf.write("\u00b4\2\u01e8\u01e9\5\u0165\u00b3\2\u01e9\u01ea\5\u015b")
buf.write("\u00ae\2\u01ea\u01eb\5\u014f\u00a8\2\u01eb\u01ec\5\u0159")
buf.write("\u00ad\2\u01ec\u01ed\5\u0143\u00a2\2\u01ed\u01ee\5\u0161")
buf.write("\u00b1\2\u01ee\u01ef\5\u0147\u00a4\2\u01ef\u01f0\5\u0157")
buf.write("\u00ac\2\u01f0\u01f1\5\u0147\u00a4\2\u01f1\u01f2\5\u0159")
buf.write("\u00ad\2\u01f2\u01f3\5\u0165\u00b3\2\u01f3J\3\2\2\2\u01f4")
buf.write("\u01f5\5\u0141\u00a1\2\u01f5\u01f6\5\u0147\u00a4\2\u01f6")
buf.write("\u01f7\5\u0149\u00a5\2\u01f7\u01f8\5\u015b\u00ae\2\u01f8")
buf.write("\u01f9\5\u0161\u00b1\2\u01f9\u01fa\5\u0147\u00a4\2\u01fa")
buf.write("L\3\2\2\2\u01fb\u01fc\5\u0141\u00a1\2\u01fc\u01fd\5\u0147")
buf.write("\u00a4\2\u01fd\u01fe\5\u014b\u00a6\2\u01fe\u01ff\5\u014f")
buf.write("\u00a8\2\u01ff\u0200\5\u0159\u00ad\2\u0200N\3\2\2\2\u0201")
buf.write("\u0202\5\u0141\u00a1\2\u0202\u0203\5\u0147\u00a4\2\u0203")
buf.write("\u0204\5\u0165\u00b3\2\u0204\u0205\5\u016b\u00b6\2\u0205")
buf.write("\u0206\5\u0147\u00a4\2\u0206\u0207\5\u0147\u00a4\2\u0207")
buf.write("\u0208\5\u0159\u00ad\2\u0208P\3\2\2\2\u0209\u020a\5\u0141")
buf.write("\u00a1\2\u020a\u020b\5\u016f\u00b8\2\u020bR\3\2\2\2\u020c")
buf.write("\u020d\5\u0143\u00a2\2\u020d\u020e\5\u013f\u00a0\2\u020e")
buf.write("\u020f\5\u0163\u00b2\2\u020f\u0210\5\u0143\u00a2\2\u0210")
buf.write("\u0211\5\u013f\u00a0\2\u0211\u0212\5\u0145\u00a3\2\u0212")
buf.write("\u0213\5\u0147\u00a4\2\u0213T\3\2\2\2\u0214\u0215\5\u0143")
buf.write("\u00a2\2\u0215\u0216\5\u013f\u00a0\2\u0216\u0217\5\u0163")
buf.write("\u00b2\2\u0217\u0218\5\u0147\u00a4\2\u0218V\3\2\2\2\u0219")
buf.write("\u021a\5\u0143\u00a2\2\u021a\u021b\5\u013f\u00a0\2\u021b")
buf.write("\u021c\5\u0163\u00b2\2\u021c\u021d\5\u0165\u00b3\2\u021d")
buf.write("X\3\2\2\2\u021e\u021f\5\u0143\u00a2\2\u021f\u0220\5\u014d")
buf.write("\u00a7\2\u0220\u0221\5\u0147\u00a4\2\u0221\u0222\5\u0143")
buf.write("\u00a2\2\u0222\u0223\5\u0153\u00aa\2\u0223Z\3\2\2\2\u0224")
buf.write("\u0225\5\u0143\u00a2\2\u0225\u0226\5\u015b\u00ae\2\u0226")
buf.write("\u0227\5\u0155\u00ab\2\u0227\u0228\5\u0155\u00ab\2\u0228")
buf.write("\u0229\5\u013f\u00a0\2\u0229\u022a\5\u0165\u00b3\2\u022a")
buf.write("\u022b\5\u0147\u00a4\2\u022b\\\3\2\2\2\u022c\u022d\5\u0143")
buf.write("\u00a2\2\u022d\u022e\5\u015b\u00ae\2\u022e\u022f\5\u0155")
buf.write("\u00ab\2\u022f\u0230\5\u0167\u00b4\2\u0230\u0231\5\u0157")
buf.write("\u00ac\2\u0231\u0232\5\u0159\u00ad\2\u0232^\3\2\2\2\u0233")
buf.write("\u0234\5\u0143\u00a2\2\u0234\u0235\5\u015b\u00ae\2\u0235")
buf.write("\u0236\5\u0157\u00ac\2\u0236\u0237\5\u0157\u00ac\2\u0237")
buf.write("\u0238\5\u014f\u00a8\2\u0238\u0239\5\u0165\u00b3\2\u0239")
buf.write("`\3\2\2\2\u023a\u023b\5\u0143\u00a2\2\u023b\u023c\5\u015b")
buf.write("\u00ae\2\u023c\u023d\5\u0159\u00ad\2\u023d\u023e\5\u0149")
buf.write("\u00a5\2\u023e\u023f\5\u0155\u00ab\2\u023f\u0240\5\u014f")
buf.write("\u00a8\2\u0240\u0241\5\u0143\u00a2\2\u0241\u0242\5\u0165")
buf.write("\u00b3\2\u0242b\3\2\2\2\u0243\u0244\5\u0143\u00a2\2\u0244")
buf.write("\u0245\5\u015b\u00ae\2\u0245\u0246\5\u0159\u00ad\2\u0246")
buf.write("\u0247\5\u0163\u00b2\2\u0247\u0248\5\u0165\u00b3\2\u0248")
buf.write("\u0249\5\u0161\u00b1\2\u0249\u024a\5\u013f\u00a0\2\u024a")
buf.write("\u024b\5\u014f\u00a8\2\u024b\u024c\5\u0159\u00ad\2\u024c")
buf.write("\u024d\5\u0165\u00b3\2\u024dd\3\2\2\2\u024e\u024f\5\u0143")
buf.write("\u00a2\2\u024f\u0250\5\u0161\u00b1\2\u0250\u0251\5\u0147")
buf.write("\u00a4\2\u0251\u0252\5\u013f\u00a0\2\u0252\u0253\5\u0165")
buf.write("\u00b3\2\u0253\u0254\5\u0147\u00a4\2\u0254f\3\2\2\2\u0255")
buf.write("\u0256\5\u0143\u00a2\2\u0256\u0257\5\u0161\u00b1\2\u0257")
buf.write("\u0258\5\u015b\u00ae\2\u0258\u0259\5\u0163\u00b2\2\u0259")
buf.write("\u025a\5\u0163\u00b2\2\u025ah\3\2\2\2\u025b\u025c\5\u0143")
buf.write("\u00a2\2\u025c\u025d\5\u0167\u00b4\2\u025d\u025e\5\u0161")
buf.write("\u00b1\2\u025e\u025f\5\u0161\u00b1\2\u025f\u0260\5\u0147")
buf.write("\u00a4\2\u0260\u0261\5\u0159\u00ad\2\u0261\u0262\5\u0165")
buf.write("\u00b3\2\u0262\u0263\7a\2\2\u0263\u0264\5\u0145\u00a3")
buf.write("\2\u0264\u0265\5\u013f\u00a0\2\u0265\u0266\5\u0165\u00b3")
buf.write("\2\u0266\u0267\5\u0147\u00a4\2\u0267j\3\2\2\2\u0268\u0269")
buf.write("\5\u0143\u00a2\2\u0269\u026a\5\u0167\u00b4\2\u026a\u026b")
buf.write("\5\u0161\u00b1\2\u026b\u026c\5\u0161\u00b1\2\u026c\u026d")
buf.write("\5\u0147\u00a4\2\u026d\u026e\5\u0159\u00ad\2\u026e\u026f")
buf.write("\5\u0165\u00b3\2\u026f\u0270\7a\2\2\u0270\u0271\5\u0165")
buf.write("\u00b3\2\u0271\u0272\5\u014f\u00a8\2\u0272\u0273\5\u0157")
buf.write("\u00ac\2\u0273\u0274\5\u0147\u00a4\2\u0274l\3\2\2\2\u0275")
buf.write("\u0276\5\u0143\u00a2\2\u0276\u0277\5\u0167\u00b4\2\u0277")
buf.write("\u0278\5\u0161\u00b1\2\u0278\u0279\5\u0161\u00b1\2\u0279")
buf.write("\u027a\5\u0147\u00a4\2\u027a\u027b\5\u0159\u00ad\2\u027b")
buf.write("\u027c\5\u0165\u00b3\2\u027c\u027d\7a\2\2\u027d\u027e")
buf.write("\5\u0165\u00b3\2\u027e\u027f\5\u014f\u00a8\2\u027f\u0280")
buf.write("\5\u0157\u00ac\2\u0280\u0281\5\u0147\u00a4\2\u0281\u0282")
buf.write("\5\u0163\u00b2\2\u0282\u0283\5\u0165\u00b3\2\u0283\u0284")
buf.write("\5\u013f\u00a0\2\u0284\u0285\5\u0157\u00ac\2\u0285\u0286")
buf.write("\5\u015d\u00af\2\u0286n\3\2\2\2\u0287\u0288\5\u0145\u00a3")
buf.write("\2\u0288\u0289\5\u013f\u00a0\2\u0289\u028a\5\u0165\u00b3")
buf.write("\2\u028a\u028b\5\u013f\u00a0\2\u028b\u028c\5\u0141\u00a1")
buf.write("\2\u028c\u028d\5\u013f\u00a0\2\u028d\u028e\5\u0163\u00b2")
buf.write("\2\u028e\u028f\5\u0147\u00a4\2\u028fp\3\2\2\2\u0290\u0291")
buf.write("\5\u0145\u00a3\2\u0291\u0292\5\u0147\u00a4\2\u0292\u0293")
buf.write("\5\u0149\u00a5\2\u0293\u0294\5\u013f\u00a0\2\u0294\u0295")
buf.write("\5\u0167\u00b4\2\u0295\u0296\5\u0155\u00ab\2\u0296\u0297")
buf.write("\5\u0165\u00b3\2\u0297r\3\2\2\2\u0298\u0299\5\u0145\u00a3")
buf.write("\2\u0299\u029a\5\u0147\u00a4\2\u029a\u029b\5\u0149\u00a5")
buf.write("\2\u029b\u029c\5\u0147\u00a4\2\u029c\u029d\5\u0161\u00b1")
buf.write("\2\u029d\u029e\5\u0161\u00b1\2\u029e\u029f\5\u013f\u00a0")
buf.write("\2\u029f\u02a0\5\u0141\u00a1\2\u02a0\u02a1\5\u0155\u00ab")
buf.write("\2\u02a1\u02a2\5\u0147\u00a4\2\u02a2t\3\2\2\2\u02a3\u02a4")
buf.write("\5\u0145\u00a3\2\u02a4\u02a5\5\u0147\u00a4\2\u02a5\u02a6")
buf.write("\5\u0149\u00a5\2\u02a6\u02a7\5\u0147\u00a4\2\u02a7\u02a8")
buf.write("\5\u0161\u00b1\2\u02a8\u02a9\5\u0161\u00b1\2\u02a9\u02aa")
buf.write("\5\u0147\u00a4\2\u02aa\u02ab\5\u0145\u00a3\2\u02abv\3")
buf.write("\2\2\2\u02ac\u02ad\5\u0145\u00a3\2\u02ad\u02ae\5\u0147")
buf.write("\u00a4\2\u02ae\u02af\5\u0155\u00ab\2\u02af\u02b0\5\u0147")
buf.write("\u00a4\2\u02b0\u02b1\5\u0165\u00b3\2\u02b1\u02b2\5\u0147")
buf.write("\u00a4\2\u02b2x\3\2\2\2\u02b3\u02b4\5\u0145\u00a3\2\u02b4")
buf.write("\u02b5\5\u0147\u00a4\2\u02b5\u02b6\5\u0163\u00b2\2\u02b6")
buf.write("\u02b7\5\u0143\u00a2\2\u02b7z\3\2\2\2\u02b8\u02b9\5\u0145")
buf.write("\u00a3\2\u02b9\u02ba\5\u0147\u00a4\2\u02ba\u02bb\5\u0165")
buf.write("\u00b3\2\u02bb\u02bc\5\u013f\u00a0\2\u02bc\u02bd\5\u0143")
buf.write("\u00a2\2\u02bd\u02be\5\u014d\u00a7\2\u02be|\3\2\2\2\u02bf")
buf.write("\u02c0\5\u0145\u00a3\2\u02c0\u02c1\5\u014f\u00a8\2\u02c1")
buf.write("\u02c2\5\u0163\u00b2\2\u02c2\u02c3\5\u0165\u00b3\2\u02c3")
buf.write("\u02c4\5\u014f\u00a8\2\u02c4\u02c5\5\u0159\u00ad\2\u02c5")
buf.write("\u02c6\5\u0143\u00a2\2\u02c6\u02c7\5\u0165\u00b3\2\u02c7")
buf.write("~\3\2\2\2\u02c8\u02c9\5\u0145\u00a3\2\u02c9\u02ca\5\u0161")
buf.write("\u00b1\2\u02ca\u02cb\5\u015b\u00ae\2\u02cb\u02cc\5\u015d")
buf.write("\u00af\2\u02cc\u0080\3\2\2\2\u02cd\u02ce\5\u0147\u00a4")
buf.write("\2\u02ce\u02cf\5\u013f\u00a0\2\u02cf\u02d0\5\u0143\u00a2")
buf.write("\2\u02d0\u02d1\5\u014d\u00a7\2\u02d1\u0082\3\2\2\2\u02d2")
buf.write("\u02d3\5\u0147\u00a4\2\u02d3\u02d4\5\u0155\u00ab\2\u02d4")
buf.write("\u02d5\5\u0163\u00b2\2\u02d5\u02d6\5\u0147\u00a4\2\u02d6")
buf.write("\u0084\3\2\2\2\u02d7\u02d8\5\u0147\u00a4\2\u02d8\u02d9")
buf.write("\5\u0159\u00ad\2\u02d9\u02da\5\u0145\u00a3\2\u02da\u0086")
buf.write("\3\2\2\2\u02db\u02dc\5\u0147\u00a4\2\u02dc\u02dd\5\u0163")
buf.write("\u00b2\2\u02dd\u02de\5\u0143\u00a2\2\u02de\u02df\5\u013f")
buf.write("\u00a0\2\u02df\u02e0\5\u015d\u00af\2\u02e0\u02e1\5\u0147")
buf.write("\u00a4\2\u02e1\u0088\3\2\2\2\u02e2\u02e3\5\u0147\u00a4")
buf.write("\2\u02e3\u02e4\5\u016d\u00b7\2\u02e4\u02e5\5\u0143\u00a2")
buf.write("\2\u02e5\u02e6\5\u0147\u00a4\2\u02e6\u02e7\5\u015d\u00af")
buf.write("\2\u02e7\u02e8\5\u0165\u00b3\2\u02e8\u008a\3\2\2\2\u02e9")
buf.write("\u02ea\5\u0147\u00a4\2\u02ea\u02eb\5\u016d\u00b7\2\u02eb")
buf.write("\u02ec\5\u0143\u00a2\2\u02ec\u02ed\5\u0155\u00ab\2\u02ed")
buf.write("\u02ee\5\u0167\u00b4\2\u02ee\u02ef\5\u0163\u00b2\2\u02ef")
buf.write("\u02f0\5\u014f\u00a8\2\u02f0\u02f1\5\u0169\u00b5\2\u02f1")
buf.write("\u02f2\5\u0147\u00a4\2\u02f2\u008c\3\2\2\2\u02f3\u02f4")
buf.write("\5\u0147\u00a4\2\u02f4\u02f5\5\u016d\u00b7\2\u02f5\u02f6")
buf.write("\5\u014f\u00a8\2\u02f6\u02f7\5\u0163\u00b2\2\u02f7\u02f8")
buf.write("\5\u0165\u00b3\2\u02f8\u02f9\5\u0163\u00b2\2\u02f9\u008e")
buf.write("\3\2\2\2\u02fa\u02fb\5\u0147\u00a4\2\u02fb\u02fc\5\u016d")
buf.write("\u00b7\2\u02fc\u02fd\5\u015d\u00af\2\u02fd\u02fe\5\u0155")
buf.write("\u00ab\2\u02fe\u02ff\5\u013f\u00a0\2\u02ff\u0300\5\u014f")
buf.write("\u00a8\2\u0300\u0301\5\u0159\u00ad\2\u0301\u0090\3\2\2")
buf.write("\2\u0302\u0303\5\u0149\u00a5\2\u0303\u0304\5\u013f\u00a0")
buf.write("\2\u0304\u0305\5\u014f\u00a8\2\u0305\u0306\5\u0155\u00ab")
buf.write("\2\u0306\u0092\3\2\2\2\u0307\u0308\5\u0149\u00a5\2\u0308")
buf.write("\u0309\5\u015b\u00ae\2\u0309\u030a\5\u0161\u00b1\2\u030a")
buf.write("\u0094\3\2\2\2\u030b\u030c\5\u0149\u00a5\2\u030c\u030d")
buf.write("\5\u015b\u00ae\2\u030d\u030e\5\u0161\u00b1\2\u030e\u030f")
buf.write("\5\u0147\u00a4\2\u030f\u0310\5\u014f\u00a8\2\u0310\u0311")
buf.write("\5\u014b\u00a6\2\u0311\u0312\5\u0159\u00ad\2\u0312\u0096")
buf.write("\3\2\2\2\u0313\u0314\5\u0149\u00a5\2\u0314\u0315\5\u0161")
buf.write("\u00b1\2\u0315\u0316\5\u015b\u00ae\2\u0316\u0317\5\u0157")
buf.write("\u00ac\2\u0317\u0098\3\2\2\2\u0318\u0319\5\u0149\u00a5")
buf.write("\2\u0319\u031a\5\u0167\u00b4\2\u031a\u031b\5\u0155\u00ab")
buf.write("\2\u031b\u031c\5\u0155\u00ab\2\u031c\u009a\3\2\2\2\u031d")
buf.write("\u031e\5\u014b\u00a6\2\u031e\u031f\5\u0155\u00ab\2\u031f")
buf.write("\u0320\5\u015b\u00ae\2\u0320\u0321\5\u0141\u00a1\2\u0321")
buf.write("\u009c\3\2\2\2\u0322\u0323\5\u014b\u00a6\2\u0323\u0324")
buf.write("\5\u0161\u00b1\2\u0324\u0325\5\u015b\u00ae\2\u0325\u0326")
buf.write("\5\u0167\u00b4\2\u0326\u0327\5\u015d\u00af\2\u0327\u009e")
buf.write("\3\2\2\2\u0328\u0329\5\u014d\u00a7\2\u0329\u032a\5\u013f")
buf.write("\u00a0\2\u032a\u032b\5\u0169\u00b5\2\u032b\u032c\5\u014f")
buf.write("\u00a8\2\u032c\u032d\5\u0159\u00ad\2\u032d\u032e\5\u014b")
buf.write("\u00a6\2\u032e\u00a0\3\2\2\2\u032f\u0330\5\u014f\u00a8")
buf.write("\2\u0330\u0331\5\u0149\u00a5\2\u0331\u00a2\3\2\2\2\u0332")
buf.write("\u0333\5\u014f\u00a8\2\u0333\u0334\5\u014b\u00a6\2\u0334")
buf.write("\u0335\5\u0159\u00ad\2\u0335\u0336\5\u015b\u00ae\2\u0336")
buf.write("\u0337\5\u0161\u00b1\2\u0337\u0338\5\u0147\u00a4\2\u0338")
buf.write("\u00a4\3\2\2\2\u0339\u033a\5\u014f\u00a8\2\u033a\u033b")
buf.write("\5\u0157\u00ac\2\u033b\u033c\5\u0157\u00ac\2\u033c\u033d")
buf.write("\5\u0147\u00a4\2\u033d\u033e\5\u0145\u00a3\2\u033e\u033f")
buf.write("\5\u014f\u00a8\2\u033f\u0340\5\u013f\u00a0\2\u0340\u0341")
buf.write("\5\u0165\u00b3\2\u0341\u0342\5\u0147\u00a4\2\u0342\u00a6")
buf.write("\3\2\2\2\u0343\u0344\5\u014f\u00a8\2\u0344\u0345\5\u0159")
buf.write("\u00ad\2\u0345\u00a8\3\2\2\2\u0346\u0347\5\u014f\u00a8")
buf.write("\2\u0347\u0348\5\u0159\u00ad\2\u0348\u0349\5\u0145\u00a3")
buf.write("\2\u0349\u034a\5\u0147\u00a4\2\u034a\u034b\5\u016d\u00b7")
buf.write("\2\u034b\u00aa\3\2\2\2\u034c\u034d\5\u014f\u00a8\2\u034d")
buf.write("\u034e\5\u0159\u00ad\2\u034e\u034f\5\u0145\u00a3\2\u034f")
buf.write("\u0350\5\u0147\u00a4\2\u0350\u0351\5\u016d\u00b7\2\u0351")
buf.write("\u0352\5\u0147\u00a4\2\u0352\u0353\5\u0145\u00a3\2\u0353")
buf.write("\u00ac\3\2\2\2\u0354\u0355\5\u014f\u00a8\2\u0355\u0356")
buf.write("\5\u0159\u00ad\2\u0356\u0357\5\u014f\u00a8\2\u0357\u0358")
buf.write("\5\u0165\u00b3\2\u0358\u0359\5\u014f\u00a8\2\u0359\u035a")
buf.write("\5\u013f\u00a0\2\u035a\u035b\5\u0155\u00ab\2\u035b\u035c")
buf.write("\5\u0155\u00ab\2\u035c\u035d\5\u016f\u00b8\2\u035d\u00ae")
buf.write("\3\2\2\2\u035e\u035f\5\u014f\u00a8\2\u035f\u0360\5\u0159")
buf.write("\u00ad\2\u0360\u0361\5\u0159\u00ad\2\u0361\u0362\5\u0147")
buf.write("\u00a4\2\u0362\u0363\5\u0161\u00b1\2\u0363\u00b0\3\2\2")
buf.write("\2\u0364\u0365\5\u014f\u00a8\2\u0365\u0366\5\u0159\u00ad")
buf.write("\2\u0366\u0367\5\u0163\u00b2\2\u0367\u0368\5\u0147\u00a4")
buf.write("\2\u0368\u0369\5\u0161\u00b1\2\u0369\u036a\5\u0165\u00b3")
buf.write("\2\u036a\u00b2\3\2\2\2\u036b\u036c\5\u014f\u00a8\2\u036c")
buf.write("\u036d\5\u0159\u00ad\2\u036d\u036e\5\u0163\u00b2\2\u036e")
buf.write("\u036f\5\u0165\u00b3\2\u036f\u0370\5\u0147\u00a4\2\u0370")
buf.write("\u0371\5\u013f\u00a0\2\u0371\u0372\5\u0145\u00a3\2\u0372")
buf.write("\u00b4\3\2\2\2\u0373\u0374\5\u014f\u00a8\2\u0374\u0375")
buf.write("\5\u0159\u00ad\2\u0375\u0376\5\u0165\u00b3\2\u0376\u0377")
buf.write("\5\u0147\u00a4\2\u0377\u0378\5\u0161\u00b1\2\u0378\u0379")
buf.write("\5\u0163\u00b2\2\u0379\u037a\5\u0147\u00a4\2\u037a\u037b")
buf.write("\5\u0143\u00a2\2\u037b\u037c\5\u0165\u00b3\2\u037c\u00b6")
buf.write("\3\2\2\2\u037d\u037e\5\u014f\u00a8\2\u037e\u037f\5\u0159")
buf.write("\u00ad\2\u037f\u0380\5\u0165\u00b3\2\u0380\u0381\5\u015b")
buf.write("\u00ae\2\u0381\u00b8\3\2\2\2\u0382\u0383\5\u014f\u00a8")
buf.write("\2\u0383\u0384\5\u0163\u00b2\2\u0384\u00ba\3\2\2\2\u0385")
buf.write("\u0386\5\u014f\u00a8\2\u0386\u0387\5\u0163\u00b2\2\u0387")
buf.write("\u0388\5\u0159\u00ad\2\u0388\u0389\5\u0167\u00b4\2\u0389")
buf.write("\u038a\5\u0155\u00ab\2\u038a\u038b\5\u0155\u00ab\2\u038b")
buf.write("\u00bc\3\2\2\2\u038c\u038d\5\u0151\u00a9\2\u038d\u038e")
buf.write("\5\u015b\u00ae\2\u038e\u038f\5\u014f\u00a8\2\u038f\u0390")
buf.write("\5\u0159\u00ad\2\u0390\u00be\3\2\2\2\u0391\u0392\5\u0153")
buf.write("\u00aa\2\u0392\u0393\5\u0147\u00a4\2\u0393\u0394\5\u016f")
buf.write("\u00b8\2\u0394\u00c0\3\2\2\2\u0395\u0396\5\u0155\u00ab")
buf.write("\2\u0396\u0397\5\u0147\u00a4\2\u0397\u0398\5\u0149\u00a5")
buf.write("\2\u0398\u0399\5\u0165\u00b3\2\u0399\u00c2\3\2\2\2\u039a")
buf.write("\u039b\5\u0155\u00ab\2\u039b\u039c\5\u014f\u00a8\2\u039c")
buf.write("\u039d\5\u0153\u00aa\2\u039d\u039e\5\u0147\u00a4\2\u039e")
buf.write("\u00c4\3\2\2\2\u039f\u03a0\5\u0155\u00ab\2\u03a0\u03a1")
buf.write("\5\u014f\u00a8\2\u03a1\u03a2\5\u0157\u00ac\2\u03a2\u03a3")
buf.write("\5\u014f\u00a8\2\u03a3\u03a4\5\u0165\u00b3\2\u03a4\u00c6")
buf.write("\3\2\2\2\u03a5\u03a6\5\u0157\u00ac\2\u03a6\u03a7\5\u013f")
buf.write("\u00a0\2\u03a7\u03a8\5\u0165\u00b3\2\u03a8\u03a9\5\u0143")
buf.write("\u00a2\2\u03a9\u03aa\5\u014d\u00a7\2\u03aa\u00c8\3\2\2")
buf.write("\2\u03ab\u03ac\5\u0159\u00ad\2\u03ac\u03ad\5\u013f\u00a0")
buf.write("\2\u03ad\u03ae\5\u0165\u00b3\2\u03ae\u03af\5\u0167\u00b4")
buf.write("\2\u03af\u03b0\5\u0161\u00b1\2\u03b0\u03b1\5\u013f\u00a0")
buf.write("\2\u03b1\u03b2\5\u0155\u00ab\2\u03b2\u00ca\3\2\2\2\u03b3")
buf.write("\u03b4\5\u0159\u00ad\2\u03b4\u03b5\5\u015b\u00ae\2\u03b5")
buf.write("\u00cc\3\2\2\2\u03b6\u03b7\5\u0159\u00ad\2\u03b7\u03b8")
buf.write("\5\u015b\u00ae\2\u03b8\u03b9\5\u0165\u00b3\2\u03b9\u00ce")
buf.write("\3\2\2\2\u03ba\u03bb\5\u0159\u00ad\2\u03bb\u03bc\5\u015b")
buf.write("\u00ae\2\u03bc\u03bd\5\u0165\u00b3\2\u03bd\u03be\5\u0159")
buf.write("\u00ad\2\u03be\u03bf\5\u0167\u00b4\2\u03bf\u03c0\5\u0155")
buf.write("\u00ab\2\u03c0\u03c1\5\u0155\u00ab\2\u03c1\u00d0\3\2\2")
buf.write("\2\u03c2\u03c3\5\u0159\u00ad\2\u03c3\u03c4\5\u0167\u00b4")
buf.write("\2\u03c4\u03c5\5\u0155\u00ab\2\u03c5\u03c6\5\u0155\u00ab")
buf.write("\2\u03c6\u00d2\3\2\2\2\u03c7\u03c8\5\u015b\u00ae\2\u03c8")
buf.write("\u03c9\5\u0149\u00a5\2\u03c9\u00d4\3\2\2\2\u03ca\u03cb")
buf.write("\5\u015b\u00ae\2\u03cb\u03cc\5\u0149\u00a5\2\u03cc\u03cd")
buf.write("\5\u0149\u00a5\2\u03cd\u03ce\5\u0163\u00b2\2\u03ce\u03cf")
buf.write("\5\u0147\u00a4\2\u03cf\u03d0\5\u0165\u00b3\2\u03d0\u00d6")
buf.write("\3\2\2\2\u03d1\u03d2\5\u015b\u00ae\2\u03d2\u03d3\5\u0159")
buf.write("\u00ad\2\u03d3\u00d8\3\2\2\2\u03d4\u03d5\5\u015b\u00ae")
buf.write("\2\u03d5\u03d6\5\u0161\u00b1\2\u03d6\u00da\3\2\2\2\u03d7")
buf.write("\u03d8\5\u015b\u00ae\2\u03d8\u03d9\5\u0161\u00b1\2\u03d9")
buf.write("\u03da\5\u0145\u00a3\2\u03da\u03db\5\u0147\u00a4\2\u03db")
buf.write("\u03dc\5\u0161\u00b1\2\u03dc\u00dc\3\2\2\2\u03dd\u03de")
buf.write("\5\u015b\u00ae\2\u03de\u03df\5\u0167\u00b4\2\u03df\u03e0")
buf.write("\5\u0165\u00b3\2\u03e0\u03e1\5\u0147\u00a4\2\u03e1\u03e2")
buf.write("\5\u0161\u00b1\2\u03e2\u00de\3\2\2\2\u03e3\u03e4\5\u015d")
buf.write("\u00af\2\u03e4\u03e5\5\u0155\u00ab\2\u03e5\u03e6\5\u013f")
buf.write("\u00a0\2\u03e6\u03e7\5\u0159\u00ad\2\u03e7\u00e0\3\2\2")
buf.write("\2\u03e8\u03e9\5\u015d\u00af\2\u03e9\u03ea\5\u0161\u00b1")
buf.write("\2\u03ea\u03eb\5\u013f\u00a0\2\u03eb\u03ec\5\u014b\u00a6")
buf.write("\2\u03ec\u03ed\5\u0157\u00ac\2\u03ed\u03ee\5\u013f\u00a0")
buf.write("\2\u03ee\u00e2\3\2\2\2\u03ef\u03f0\5\u015d\u00af\2\u03f0")
buf.write("\u03f1\5\u0161\u00b1\2\u03f1\u03f2\5\u014f\u00a8\2\u03f2")
buf.write("\u03f3\5\u0157\u00ac\2\u03f3\u03f4\5\u013f\u00a0\2\u03f4")
buf.write("\u03f5\5\u0161\u00b1\2\u03f5\u03f6\5\u016f\u00b8\2\u03f6")
buf.write("\u00e4\3\2\2\2\u03f7\u03f8\5\u015f\u00b0\2\u03f8\u03f9")
buf.write("\5\u0167\u00b4\2\u03f9\u03fa\5\u0147\u00a4\2\u03fa\u03fb")
buf.write("\5\u0161\u00b1\2\u03fb\u03fc\5\u016f\u00b8\2\u03fc\u00e6")
buf.write("\3\2\2\2\u03fd\u03fe\5\u0161\u00b1\2\u03fe\u03ff\5\u013f")
buf.write("\u00a0\2\u03ff\u0400\5\u014f\u00a8\2\u0400\u0401\5\u0163")
buf.write("\u00b2\2\u0401\u0402\5\u0147\u00a4\2\u0402\u00e8\3\2\2")
buf.write("\2\u0403\u0404\5\u0161\u00b1\2\u0404\u0405\5\u0147\u00a4")
buf.write("\2\u0405\u0406\5\u0143\u00a2\2\u0406\u0407\5\u0167\u00b4")
buf.write("\2\u0407\u0408\5\u0161\u00b1\2\u0408\u0409\5\u0163\u00b2")
buf.write("\2\u0409\u040a\5\u014f\u00a8\2\u040a\u040b\5\u0169\u00b5")
buf.write("\2\u040b\u040c\5\u0147\u00a4\2\u040c\u00ea\3\2\2\2\u040d")
buf.write("\u040e\5\u0161\u00b1\2\u040e\u040f\5\u0147\u00a4\2\u040f")
buf.write("\u0410\5\u0149\u00a5\2\u0410\u0411\5\u0147\u00a4\2\u0411")
buf.write("\u0412\5\u0161\u00b1\2\u0412\u0413\5\u0147\u00a4\2\u0413")
buf.write("\u0414\5\u0159\u00ad\2\u0414\u0415\5\u0143\u00a2\2\u0415")
buf.write("\u0416\5\u0147\u00a4\2\u0416\u0417\5\u0163\u00b2\2\u0417")
buf.write("\u00ec\3\2\2\2\u0418\u0419\5\u0161\u00b1\2\u0419\u041a")
buf.write("\5\u0147\u00a4\2\u041a\u041b\5\u014b\u00a6\2\u041b\u041c")
buf.write("\5\u0147\u00a4\2\u041c\u041d\5\u016d\u00b7\2\u041d\u041e")
buf.write("\5\u015d\u00af\2\u041e\u00ee\3\2\2\2\u041f\u0420\5\u0161")
buf.write("\u00b1\2\u0420\u0421\5\u0147\u00a4\2\u0421\u0422\5\u014f")
buf.write("\u00a8\2\u0422\u0423\5\u0159\u00ad\2\u0423\u0424\5\u0145")
buf.write("\u00a3\2\u0424\u0425\5\u0147\u00a4\2\u0425\u0426\5\u016d")
buf.write("\u00b7\2\u0426\u00f0\3\2\2\2\u0427\u0428\5\u0161\u00b1")
buf.write("\2\u0428\u0429\5\u0147\u00a4\2\u0429\u042a\5\u0155\u00ab")
buf.write("\2\u042a\u042b\5\u0147\u00a4\2\u042b\u042c\5\u013f\u00a0")
buf.write("\2\u042c\u042d\5\u0163\u00b2\2\u042d\u042e\5\u0147\u00a4")
buf.write("\2\u042e\u00f2\3\2\2\2\u042f\u0430\5\u0161\u00b1\2\u0430")
buf.write("\u0431\5\u0147\u00a4\2\u0431\u0432\5\u0159\u00ad\2\u0432")
buf.write("\u0433\5\u013f\u00a0\2\u0433\u0434\5\u0157\u00ac\2\u0434")
buf.write("\u0435\5\u0147\u00a4\2\u0435\u00f4\3\2\2\2\u0436\u0437")
buf.write("\5\u0161\u00b1\2\u0437\u0438\5\u0147\u00a4\2\u0438\u0439")
buf.write("\5\u015d\u00af\2\u0439\u043a\5\u0155\u00ab\2\u043a\u043b")
buf.write("\5\u013f\u00a0\2\u043b\u043c\5\u0143\u00a2\2\u043c\u043d")
buf.write("\5\u0147\u00a4\2\u043d\u00f6\3\2\2\2\u043e\u043f\5\u0161")
buf.write("\u00b1\2\u043f\u0440\5\u0147\u00a4\2\u0440\u0441\5\u0163")
buf.write("\u00b2\2\u0441\u0442\5\u0165\u00b3\2\u0442\u0443\5\u0161")
buf.write("\u00b1\2\u0443\u0444\5\u014f\u00a8\2\u0444\u0445\5\u0143")
buf.write("\u00a2\2\u0445\u0446\5\u0165\u00b3\2\u0446\u00f8\3\2\2")
buf.write("\2\u0447\u0448\5\u0161\u00b1\2\u0448\u0449\5\u014f\u00a8")
buf.write("\2\u0449\u044a\5\u014b\u00a6\2\u044a\u044b\5\u014d\u00a7")
buf.write("\2\u044b\u044c\5\u0165\u00b3\2\u044c\u00fa\3\2\2\2\u044d")
buf.write("\u044e\5\u0161\u00b1\2\u044e\u044f\5\u015b\u00ae\2\u044f")
buf.write("\u0450\5\u0155\u00ab\2\u0450\u0451\5\u0155\u00ab\2\u0451")
buf.write("\u0452\5\u0141\u00a1\2\u0452\u0453\5\u013f\u00a0\2\u0453")
buf.write("\u0454\5\u0143\u00a2\2\u0454\u0455\5\u0153\u00aa\2\u0455")
buf.write("\u00fc\3\2\2\2\u0456\u0457\5\u0161\u00b1\2\u0457\u0458")
buf.write("\5\u015b\u00ae\2\u0458\u0459\5\u016b\u00b6\2\u0459\u00fe")
buf.write("\3\2\2\2\u045a\u045b\5\u0163\u00b2\2\u045b\u045c\5\u013f")
buf.write("\u00a0\2\u045c\u045d\5\u0169\u00b5\2\u045d\u045e\5\u0147")
buf.write("\u00a4\2\u045e\u045f\5\u015d\u00af\2\u045f\u0460\5\u015b")
buf.write("\u00ae\2\u0460\u0461\5\u014f\u00a8\2\u0461\u0462\5\u0159")
buf.write("\u00ad\2\u0462\u0463\5\u0165\u00b3\2\u0463\u0100\3\2\2")
buf.write("\2\u0464\u0465\5\u0163\u00b2\2\u0465\u0466\5\u0147\u00a4")
buf.write("\2\u0466\u0467\5\u0155\u00ab\2\u0467\u0468\5\u0147\u00a4")
buf.write("\2\u0468\u0469\5\u0143\u00a2\2\u0469\u046a\5\u0165\u00b3")
buf.write("\2\u046a\u0102\3\2\2\2\u046b\u046c\5\u0163\u00b2\2\u046c")
buf.write("\u046d\5\u0147\u00a4\2\u046d\u046e\5\u0165\u00b3\2\u046e")
buf.write("\u0104\3\2\2\2\u046f\u0470\5\u0165\u00b3\2\u0470\u0471")
buf.write("\5\u013f\u00a0\2\u0471\u0472\5\u0141\u00a1\2\u0472\u0473")
buf.write("\5\u0155\u00ab\2\u0473\u0474\5\u0147\u00a4\2\u0474\u0106")
buf.write("\3\2\2\2\u0475\u0476\5\u0165\u00b3\2\u0476\u0477\5\u0147")
buf.write("\u00a4\2\u0477\u0478\5\u0157\u00ac\2\u0478\u0479\5\u015d")
buf.write("\u00af\2\u0479\u0108\3\2\2\2\u047a\u047b\5\u0165\u00b3")
buf.write("\2\u047b\u047c\5\u0147\u00a4\2\u047c\u047d\5\u0157\u00ac")
buf.write("\2\u047d\u047e\5\u015d\u00af\2\u047e\u047f\5\u015b\u00ae")
buf.write("\2\u047f\u0480\5\u0161\u00b1\2\u0480\u0481\5\u013f\u00a0")
buf.write("\2\u0481\u0482\5\u0161\u00b1\2\u0482\u0483\5\u016f\u00b8")
buf.write("\2\u0483\u010a\3\2\2\2\u0484\u0485\5\u0165\u00b3\2\u0485")
buf.write("\u0486\5\u014d\u00a7\2\u0486\u0487\5\u0147\u00a4\2\u0487")
buf.write("\u0488\5\u0159\u00ad\2\u0488\u010c\3\2\2\2\u0489\u048a")
buf.write("\5\u0165\u00b3\2\u048a\u048b\5\u015b\u00ae\2\u048b\u010e")
buf.write("\3\2\2\2\u048c\u048d\5\u0165\u00b3\2\u048d\u048e\5\u0161")
buf.write("\u00b1\2\u048e\u048f\5\u013f\u00a0\2\u048f\u0490\5\u0159")
buf.write("\u00ad\2\u0490\u0491\5\u0163\u00b2\2\u0491\u0492\5\u013f")
buf.write("\u00a0\2\u0492\u0493\5\u0143\u00a2\2\u0493\u0494\5\u0165")
buf.write("\u00b3\2\u0494\u0495\5\u014f\u00a8\2\u0495\u0496\5\u015b")
buf.write("\u00ae\2\u0496\u0497\5\u0159\u00ad\2\u0497\u0110\3\2\2")
buf.write("\2\u0498\u0499\5\u0165\u00b3\2\u0499\u049a\5\u0161\u00b1")
buf.write("\2\u049a\u049b\5\u014f\u00a8\2\u049b\u049c\5\u014b\u00a6")
buf.write("\2\u049c\u049d\5\u014b\u00a6\2\u049d\u049e\5\u0147\u00a4")
buf.write("\2\u049e\u049f\5\u0161\u00b1\2\u049f\u0112\3\2\2\2\u04a0")
buf.write("\u04a1\5\u0167\u00b4\2\u04a1\u04a2\5\u0159\u00ad\2\u04a2")
buf.write("\u04a3\5\u014f\u00a8\2\u04a3\u04a4\5\u015b\u00ae\2\u04a4")
buf.write("\u04a5\5\u0159\u00ad\2\u04a5\u0114\3\2\2\2\u04a6\u04a7")
buf.write("\5\u0167\u00b4\2\u04a7\u04a8\5\u0159\u00ad\2\u04a8\u04a9")
buf.write("\5\u014f\u00a8\2\u04a9\u04aa\5\u015f\u00b0\2\u04aa\u04ab")
buf.write("\5\u0167\u00b4\2\u04ab\u04ac\5\u0147\u00a4\2\u04ac\u0116")
buf.write("\3\2\2\2\u04ad\u04ae\5\u0167\u00b4\2\u04ae\u04af\5\u015d")
buf.write("\u00af\2\u04af\u04b0\5\u0145\u00a3\2\u04b0\u04b1\5\u013f")
buf.write("\u00a0\2\u04b1\u04b2\5\u0165\u00b3\2\u04b2\u04b3\5\u0147")
buf.write("\u00a4\2\u04b3\u0118\3\2\2\2\u04b4\u04b5\5\u0167\u00b4")
buf.write("\2\u04b5\u04b6\5\u0163\u00b2\2\u04b6\u04b7\5\u014f\u00a8")
buf.write("\2\u04b7\u04b8\5\u0159\u00ad\2\u04b8\u04b9\5\u014b\u00a6")
buf.write("\2\u04b9\u011a\3\2\2\2\u04ba\u04bb\5\u0169\u00b5\2\u04bb")
buf.write("\u04bc\5\u013f\u00a0\2\u04bc\u04bd\5\u0143\u00a2\2\u04bd")
buf.write("\u04be\5\u0167\u00b4\2\u04be\u04bf\5\u0167\u00b4\2\u04bf")
buf.write("\u04c0\5\u0157\u00ac\2\u04c0\u011c\3\2\2\2\u04c1\u04c2")
buf.write("\5\u0169\u00b5\2\u04c2\u04c3\5\u013f\u00a0\2\u04c3\u04c4")
buf.write("\5\u0155\u00ab\2\u04c4\u04c5\5\u0167\u00b4\2\u04c5\u04c6")
buf.write("\5\u0147\u00a4\2\u04c6\u04c7\5\u0163\u00b2\2\u04c7\u011e")
buf.write("\3\2\2\2\u04c8\u04c9\5\u0169\u00b5\2\u04c9\u04ca\5\u014f")
buf.write("\u00a8\2\u04ca\u04cb\5\u0147\u00a4\2\u04cb\u04cc\5\u016b")
buf.write("\u00b6\2\u04cc\u0120\3\2\2\2\u04cd\u04ce\5\u0169\u00b5")
buf.write("\2\u04ce\u04cf\5\u014f\u00a8\2\u04cf\u04d0\5\u0161\u00b1")
buf.write("\2\u04d0\u04d1\5\u0165\u00b3\2\u04d1\u04d2\5\u0167\u00b4")
buf.write("\2\u04d2\u04d3\5\u013f\u00a0\2\u04d3\u04d4\5\u0155\u00ab")
buf.write("\2\u04d4\u0122\3\2\2\2\u04d5\u04d6\5\u016b\u00b6\2\u04d6")
buf.write("\u04d7\5\u014d\u00a7\2\u04d7\u04d8\5\u0147\u00a4\2\u04d8")
buf.write("\u04d9\5\u0159\u00ad\2\u04d9\u0124\3\2\2\2\u04da\u04db")
buf.write("\5\u016b\u00b6\2\u04db\u04dc\5\u014d\u00a7\2\u04dc\u04dd")
buf.write("\5\u0147\u00a4\2\u04dd\u04de\5\u0161\u00b1\2\u04de\u04df")
buf.write("\5\u0147\u00a4\2\u04df\u0126\3\2\2\2\u04e0\u04e1\5\u016b")
buf.write("\u00b6\2\u04e1\u04e2\5\u014f\u00a8\2\u04e2\u04e3\5\u0165")
buf.write("\u00b3\2\u04e3\u04e4\5\u014d\u00a7\2\u04e4\u0128\3\2\2")
buf.write("\2\u04e5\u04e6\5\u016b\u00b6\2\u04e6\u04e7\5\u014f\u00a8")
buf.write("\2\u04e7\u04e8\5\u0165\u00b3\2\u04e8\u04e9\5\u014d\u00a7")
buf.write("\2\u04e9\u04ea\5\u015b\u00ae\2\u04ea\u04eb\5\u0167\u00b4")
buf.write("\2\u04eb\u04ec\5\u0165\u00b3\2\u04ec\u012a\3\2\2\2\u04ed")
buf.write("\u04f3\7$\2\2\u04ee\u04f2\n\2\2\2\u04ef\u04f0\7$\2\2\u04f0")
buf.write("\u04f2\7$\2\2\u04f1\u04ee\3\2\2\2\u04f1\u04ef\3\2\2\2")
buf.write("\u04f2\u04f5\3\2\2\2\u04f3\u04f1\3\2\2\2\u04f3\u04f4\3")
buf.write("\2\2\2\u04f4\u04f6\3\2\2\2\u04f5\u04f3\3\2\2\2\u04f6\u0511")
buf.write("\7$\2\2\u04f7\u04fd\7b\2\2\u04f8\u04fc\n\3\2\2\u04f9\u04fa")
buf.write("\7b\2\2\u04fa\u04fc\7b\2\2\u04fb\u04f8\3\2\2\2\u04fb\u04f9")
buf.write("\3\2\2\2\u04fc\u04ff\3\2\2\2\u04fd\u04fb\3\2\2\2\u04fd")
buf.write("\u04fe\3\2\2\2\u04fe\u0500\3\2\2\2\u04ff\u04fd\3\2\2\2")
buf.write("\u0500\u0511\7b\2\2\u0501\u0505\7]\2\2\u0502\u0504\n\4")
buf.write("\2\2\u0503\u0502\3\2\2\2\u0504\u0507\3\2\2\2\u0505\u0503")
buf.write("\3\2\2\2\u0505\u0506\3\2\2\2\u0506\u0508\3\2\2\2\u0507")
buf.write("\u0505\3\2\2\2\u0508\u0511\7_\2\2\u0509\u050d\t\5\2\2")
buf.write("\u050a\u050c\t\6\2\2\u050b\u050a\3\2\2\2\u050c\u050f\3")
buf.write("\2\2\2\u050d\u050b\3\2\2\2\u050d\u050e\3\2\2\2\u050e\u0511")
buf.write("\3\2\2\2\u050f\u050d\3\2\2\2\u0510\u04ed\3\2\2\2\u0510")
buf.write("\u04f7\3\2\2\2\u0510\u0501\3\2\2\2\u0510\u0509\3\2\2\2")
buf.write("\u0511\u012c\3\2\2\2\u0512\u0514\5\u013d\u009f\2\u0513")
buf.write("\u0512\3\2\2\2\u0514\u0515\3\2\2\2\u0515\u0513\3\2\2\2")
buf.write("\u0515\u0516\3\2\2\2\u0516\u051e\3\2\2\2\u0517\u051b\7")
buf.write("\60\2\2\u0518\u051a\5\u013d\u009f\2\u0519\u0518\3\2\2")
buf.write("\2\u051a\u051d\3\2\2\2\u051b\u0519\3\2\2\2\u051b\u051c")
buf.write("\3\2\2\2\u051c\u051f\3\2\2\2\u051d\u051b\3\2\2\2\u051e")
buf.write("\u0517\3\2\2\2\u051e\u051f\3\2\2\2\u051f\u0529\3\2\2\2")
buf.write("\u0520\u0522\5\u0147\u00a4\2\u0521\u0523\t\7\2\2\u0522")
buf.write("\u0521\3\2\2\2\u0522\u0523\3\2\2\2\u0523\u0525\3\2\2\2")
buf.write("\u0524\u0526\5\u013d\u009f\2\u0525\u0524\3\2\2\2\u0526")
buf.write("\u0527\3\2\2\2\u0527\u0525\3\2\2\2\u0527\u0528\3\2\2\2")
buf.write("\u0528\u052a\3\2\2\2\u0529\u0520\3\2\2\2\u0529\u052a\3")
buf.write("\2\2\2\u052a\u053d\3\2\2\2\u052b\u052d\7\60\2\2\u052c")
buf.write("\u052e\5\u013d\u009f\2\u052d\u052c\3\2\2\2\u052e\u052f")
buf.write("\3\2\2\2\u052f\u052d\3\2\2\2\u052f\u0530\3\2\2\2\u0530")
buf.write("\u053a\3\2\2\2\u0531\u0533\5\u0147\u00a4\2\u0532\u0534")
buf.write("\t\7\2\2\u0533\u0532\3\2\2\2\u0533\u0534\3\2\2\2\u0534")
buf.write("\u0536\3\2\2\2\u0535\u0537\5\u013d\u009f\2\u0536\u0535")
buf.write("\3\2\2\2\u0537\u0538\3\2\2\2\u0538\u0536\3\2\2\2\u0538")
buf.write("\u0539\3\2\2\2\u0539\u053b\3\2\2\2\u053a\u0531\3\2\2\2")
buf.write("\u053a\u053b\3\2\2\2\u053b\u053d\3\2\2\2\u053c\u0513\3")
buf.write("\2\2\2\u053c\u052b\3\2\2\2\u053d\u012e\3\2\2\2\u053e\u0542")
buf.write("\7A\2\2\u053f\u0541\5\u013d\u009f\2\u0540\u053f\3\2\2")
buf.write("\2\u0541\u0544\3\2\2\2\u0542\u0540\3\2\2\2\u0542\u0543")
buf.write("\3\2\2\2\u0543\u0548\3\2\2\2\u0544\u0542\3\2\2\2\u0545")
buf.write("\u0546\t\b\2\2\u0546\u0548\5\u012b\u0096\2\u0547\u053e")
buf.write("\3\2\2\2\u0547\u0545\3\2\2\2\u0548\u0130\3\2\2\2\u0549")
buf.write("\u054f\7)\2\2\u054a\u054e\n\t\2\2\u054b\u054c\7)\2\2\u054c")
buf.write("\u054e\7)\2\2\u054d\u054a\3\2\2\2\u054d\u054b\3\2\2\2")
buf.write("\u054e\u0551\3\2\2\2\u054f\u054d\3\2\2\2\u054f\u0550\3")
buf.write("\2\2\2\u0550\u0552\3\2\2\2\u0551\u054f\3\2\2\2\u0552\u0553")
buf.write("\7)\2\2\u0553\u0132\3\2\2\2\u0554\u0555\5\u016d\u00b7")
buf.write("\2\u0555\u0556\5\u0131\u0099\2\u0556\u0134\3\2\2\2\u0557")
buf.write("\u0558\7/\2\2\u0558\u0559\7/\2\2\u0559\u055d\3\2\2\2\u055a")
buf.write("\u055c\n\n\2\2\u055b\u055a\3\2\2\2\u055c\u055f\3\2\2\2")
buf.write("\u055d\u055b\3\2\2\2\u055d\u055e\3\2\2\2\u055e\u0560\3")
buf.write("\2\2\2\u055f\u055d\3\2\2\2\u0560\u0561\b\u009b\2\2\u0561")
buf.write("\u0136\3\2\2\2\u0562\u0563\7\61\2\2\u0563\u0564\7,\2\2")
buf.write("\u0564\u0568\3\2\2\2\u0565\u0567\13\2\2\2\u0566\u0565")
buf.write("\3\2\2\2\u0567\u056a\3\2\2\2\u0568\u0569\3\2\2\2\u0568")
buf.write("\u0566\3\2\2\2\u0569\u056e\3\2\2\2\u056a\u0568\3\2\2\2")
buf.write("\u056b\u056c\7,\2\2\u056c\u056f\7\61\2\2\u056d\u056f\7")
buf.write("\2\2\3\u056e\u056b\3\2\2\2\u056e\u056d\3\2\2\2\u056f\u0570")
buf.write("\3\2\2\2\u0570\u0571\b\u009c\2\2\u0571\u0138\3\2\2\2\u0572")
buf.write("\u0573\t\13\2\2\u0573\u0574\3\2\2\2\u0574\u0575\b\u009d")
buf.write("\2\2\u0575\u013a\3\2\2\2\u0576\u0577\13\2\2\2\u0577\u013c")
buf.write("\3\2\2\2\u0578\u0579\t\f\2\2\u0579\u013e\3\2\2\2\u057a")
buf.write("\u057b\t\r\2\2\u057b\u0140\3\2\2\2\u057c\u057d\t\16\2")
buf.write("\2\u057d\u0142\3\2\2\2\u057e\u057f\t\17\2\2\u057f\u0144")
buf.write("\3\2\2\2\u0580\u0581\t\20\2\2\u0581\u0146\3\2\2\2\u0582")
buf.write("\u0583\t\21\2\2\u0583\u0148\3\2\2\2\u0584\u0585\t\22\2")
buf.write("\2\u0585\u014a\3\2\2\2\u0586\u0587\t\23\2\2\u0587\u014c")
buf.write("\3\2\2\2\u0588\u0589\t\24\2\2\u0589\u014e\3\2\2\2\u058a")
buf.write("\u058b\t\25\2\2\u058b\u0150\3\2\2\2\u058c\u058d\t\26\2")
buf.write("\2\u058d\u0152\3\2\2\2\u058e\u058f\t\27\2\2\u058f\u0154")
buf.write("\3\2\2\2\u0590\u0591\t\30\2\2\u0591\u0156\3\2\2\2\u0592")
buf.write("\u0593\t\31\2\2\u0593\u0158\3\2\2\2\u0594\u0595\t\32\2")
buf.write("\2\u0595\u015a\3\2\2\2\u0596\u0597\t\33\2\2\u0597\u015c")
buf.write("\3\2\2\2\u0598\u0599\t\34\2\2\u0599\u015e\3\2\2\2\u059a")
buf.write("\u059b\t\35\2\2\u059b\u0160\3\2\2\2\u059c\u059d\t\36\2")
buf.write("\2\u059d\u0162\3\2\2\2\u059e\u059f\t\37\2\2\u059f\u0164")
buf.write("\3\2\2\2\u05a0\u05a1\t \2\2\u05a1\u0166\3\2\2\2\u05a2")
buf.write("\u05a3\t!\2\2\u05a3\u0168\3\2\2\2\u05a4\u05a5\t\"\2\2")
buf.write("\u05a5\u016a\3\2\2\2\u05a6\u05a7\t#\2\2\u05a7\u016c\3")
buf.write("\2\2\2\u05a8\u05a9\t$\2\2\u05a9\u016e\3\2\2\2\u05aa\u05ab")
buf.write("\t%\2\2\u05ab\u0170\3\2\2\2\u05ac\u05ad\t&\2\2\u05ad\u0172")
buf.write("\3\2\2\2\34\2\u04f1\u04f3\u04fb\u04fd\u0505\u050d\u0510")
buf.write("\u0515\u051b\u051e\u0522\u0527\u0529\u052f\u0533\u0538")
buf.write("\u053a\u053c\u0542\u0547\u054d\u054f\u055d\u0568\u056e")
buf.write("\3\2\3\2")
return buf.getvalue()
class JsonSQLLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
SCOL = 1
DOT = 2
OPEN_PAR = 3
CLOSE_PAR = 4
COMMA = 5
ASSIGN = 6
STAR = 7
PLUS = 8
MINUS = 9
TILDE = 10
PIPE2 = 11
DIV = 12
MOD = 13
LT2 = 14
GT2 = 15
AMP = 16
PIPE = 17
LT = 18
LT_EQ = 19
GT = 20
GT_EQ = 21
EQ = 22
NOT_EQ1 = 23
NOT_EQ2 = 24
K_ABORT = 25
K_ACTION = 26
K_ADD = 27
K_AFTER = 28
K_ALL = 29
K_ALTER = 30
K_ANALYZE = 31
K_AND = 32
K_AS = 33
K_ASC = 34
K_ATTACH = 35
K_AUTOINCREMENT = 36
K_BEFORE = 37
K_BEGIN = 38
K_BETWEEN = 39
K_BY = 40
K_CASCADE = 41
K_CASE = 42
K_CAST = 43
K_CHECK = 44
K_COLLATE = 45
K_COLUMN = 46
K_COMMIT = 47
K_CONFLICT = 48
K_CONSTRAINT = 49
K_CREATE = 50
K_CROSS = 51
K_CURRENT_DATE = 52
K_CURRENT_TIME = 53
K_CURRENT_TIMESTAMP = 54
K_DATABASE = 55
K_DEFAULT = 56
K_DEFERRABLE = 57
K_DEFERRED = 58
K_DELETE = 59
K_DESC = 60
K_DETACH = 61
K_DISTINCT = 62
K_DROP = 63
K_EACH = 64
K_ELSE = 65
K_END = 66
K_ESCAPE = 67
K_EXCEPT = 68
K_EXCLUSIVE = 69
K_EXISTS = 70
K_EXPLAIN = 71
K_FAIL = 72
K_FOR = 73
K_FOREIGN = 74
K_FROM = 75
K_FULL = 76
K_GLOB = 77
K_GROUP = 78
K_HAVING = 79
K_IF = 80
K_IGNORE = 81
K_IMMEDIATE = 82
K_IN = 83
K_INDEX = 84
K_INDEXED = 85
K_INITIALLY = 86
K_INNER = 87
K_INSERT = 88
K_INSTEAD = 89
K_INTERSECT = 90
K_INTO = 91
K_IS = 92
K_ISNULL = 93
K_JOIN = 94
K_KEY = 95
K_LEFT = 96
K_LIKE = 97
K_LIMIT = 98
K_MATCH = 99
K_NATURAL = 100
K_NO = 101
K_NOT = 102
K_NOTNULL = 103
K_NULL = 104
K_OF = 105
K_OFFSET = 106
K_ON = 107
K_OR = 108
K_ORDER = 109
K_OUTER = 110
K_PLAN = 111
K_PRAGMA = 112
K_PRIMARY = 113
K_QUERY = 114
K_RAISE = 115
K_RECURSIVE = 116
K_REFERENCES = 117
K_REGEXP = 118
K_REINDEX = 119
K_RELEASE = 120
K_RENAME = 121
K_REPLACE = 122
K_RESTRICT = 123
K_RIGHT = 124
K_ROLLBACK = 125
K_ROW = 126
K_SAVEPOINT = 127
K_SELECT = 128
K_SET = 129
K_TABLE = 130
K_TEMP = 131
K_TEMPORARY = 132
K_THEN = 133
K_TO = 134
K_TRANSACTION = 135
K_TRIGGER = 136
K_UNION = 137
K_UNIQUE = 138
K_UPDATE = 139
K_USING = 140
K_VACUUM = 141
K_VALUES = 142
K_VIEW = 143
K_VIRTUAL = 144
K_WHEN = 145
K_WHERE = 146
K_WITH = 147
K_WITHOUT = 148
IDENTIFIER = 149
NUMERIC_LITERAL = 150
BIND_PARAMETER = 151
STRING_LITERAL = 152
BLOB_LITERAL = 153
SINGLE_LINE_COMMENT = 154
MULTILINE_COMMENT = 155
SPACES = 156
UNEXPECTED_CHAR = 157
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"';'", "'.'", "'('", "')'", "','", "'='", "'*'", "'+'", "'-'",
"'~'", "'||'", "'/'", "'%'", "'<<'", "'>>'", "'&'", "'|'", "'<'",
"'<='", "'>'", "'>='", "'=='", "'!='", "'<>'" ]
symbolicNames = [ "<INVALID>",
"SCOL", "DOT", "OPEN_PAR", "CLOSE_PAR", "COMMA", "ASSIGN", "STAR",
"PLUS", "MINUS", "TILDE", "PIPE2", "DIV", "MOD", "LT2", "GT2",
"AMP", "PIPE", "LT", "LT_EQ", "GT", "GT_EQ", "EQ", "NOT_EQ1",
"NOT_EQ2", "K_ABORT", "K_ACTION", "K_ADD", "K_AFTER", "K_ALL",
"K_ALTER", "K_ANALYZE", "K_AND", "K_AS", "K_ASC", "K_ATTACH",
"K_AUTOINCREMENT", "K_BEFORE", "K_BEGIN", "K_BETWEEN", "K_BY",
"K_CASCADE", "K_CASE", "K_CAST", "K_CHECK", "K_COLLATE", "K_COLUMN",
"K_COMMIT", "K_CONFLICT", "K_CONSTRAINT", "K_CREATE", "K_CROSS",
"K_CURRENT_DATE", "K_CURRENT_TIME", "K_CURRENT_TIMESTAMP", "K_DATABASE",
"K_DEFAULT", "K_DEFERRABLE", "K_DEFERRED", "K_DELETE", "K_DESC",
"K_DETACH", "K_DISTINCT", "K_DROP", "K_EACH", "K_ELSE", "K_END",
"K_ESCAPE", "K_EXCEPT", "K_EXCLUSIVE", "K_EXISTS", "K_EXPLAIN",
"K_FAIL", "K_FOR", "K_FOREIGN", "K_FROM", "K_FULL", "K_GLOB",
"K_GROUP", "K_HAVING", "K_IF", "K_IGNORE", "K_IMMEDIATE", "K_IN",
"K_INDEX", "K_INDEXED", "K_INITIALLY", "K_INNER", "K_INSERT",
"K_INSTEAD", "K_INTERSECT", "K_INTO", "K_IS", "K_ISNULL", "K_JOIN",
"K_KEY", "K_LEFT", "K_LIKE", "K_LIMIT", "K_MATCH", "K_NATURAL",
"K_NO", "K_NOT", "K_NOTNULL", "K_NULL", "K_OF", "K_OFFSET",
"K_ON", "K_OR", "K_ORDER", "K_OUTER", "K_PLAN", "K_PRAGMA",
"K_PRIMARY", "K_QUERY", "K_RAISE", "K_RECURSIVE", "K_REFERENCES",
"K_REGEXP", "K_REINDEX", "K_RELEASE", "K_RENAME", "K_REPLACE",
"K_RESTRICT", "K_RIGHT", "K_ROLLBACK", "K_ROW", "K_SAVEPOINT",
"K_SELECT", "K_SET", "K_TABLE", "K_TEMP", "K_TEMPORARY", "K_THEN",
"K_TO", "K_TRANSACTION", "K_TRIGGER", "K_UNION", "K_UNIQUE",
"K_UPDATE", "K_USING", "K_VACUUM", "K_VALUES", "K_VIEW", "K_VIRTUAL",
"K_WHEN", "K_WHERE", "K_WITH", "K_WITHOUT", "IDENTIFIER", "NUMERIC_LITERAL",
"BIND_PARAMETER", "STRING_LITERAL", "BLOB_LITERAL", "SINGLE_LINE_COMMENT",
"MULTILINE_COMMENT", "SPACES", "UNEXPECTED_CHAR" ]
ruleNames = [ "SCOL", "DOT", "OPEN_PAR", "CLOSE_PAR", "COMMA", "ASSIGN",
"STAR", "PLUS", "MINUS", "TILDE", "PIPE2", "DIV", "MOD",
"LT2", "GT2", "AMP", "PIPE", "LT", "LT_EQ", "GT", "GT_EQ",
"EQ", "NOT_EQ1", "NOT_EQ2", "K_ABORT", "K_ACTION", "K_ADD",
"K_AFTER", "K_ALL", "K_ALTER", "K_ANALYZE", "K_AND", "K_AS",
"K_ASC", "K_ATTACH", "K_AUTOINCREMENT", "K_BEFORE", "K_BEGIN",
"K_BETWEEN", "K_BY", "K_CASCADE", "K_CASE", "K_CAST",
"K_CHECK", "K_COLLATE", "K_COLUMN", "K_COMMIT", "K_CONFLICT",
"K_CONSTRAINT", "K_CREATE", "K_CROSS", "K_CURRENT_DATE",
"K_CURRENT_TIME", "K_CURRENT_TIMESTAMP", "K_DATABASE",
"K_DEFAULT", "K_DEFERRABLE", "K_DEFERRED", "K_DELETE",
"K_DESC", "K_DETACH", "K_DISTINCT", "K_DROP", "K_EACH",
"K_ELSE", "K_END", "K_ESCAPE", "K_EXCEPT", "K_EXCLUSIVE",
"K_EXISTS", "K_EXPLAIN", "K_FAIL", "K_FOR", "K_FOREIGN",
"K_FROM", "K_FULL", "K_GLOB", "K_GROUP", "K_HAVING", "K_IF",
"K_IGNORE", "K_IMMEDIATE", "K_IN", "K_INDEX", "K_INDEXED",
"K_INITIALLY", "K_INNER", "K_INSERT", "K_INSTEAD", "K_INTERSECT",
"K_INTO", "K_IS", "K_ISNULL", "K_JOIN", "K_KEY", "K_LEFT",
"K_LIKE", "K_LIMIT", "K_MATCH", "K_NATURAL", "K_NO", "K_NOT",
"K_NOTNULL", "K_NULL", "K_OF", "K_OFFSET", "K_ON", "K_OR",
"K_ORDER", "K_OUTER", "K_PLAN", "K_PRAGMA", "K_PRIMARY",
"K_QUERY", "K_RAISE", "K_RECURSIVE", "K_REFERENCES", "K_REGEXP",
"K_REINDEX", "K_RELEASE", "K_RENAME", "K_REPLACE", "K_RESTRICT",
"K_RIGHT", "K_ROLLBACK", "K_ROW", "K_SAVEPOINT", "K_SELECT",
"K_SET", "K_TABLE", "K_TEMP", "K_TEMPORARY", "K_THEN",
"K_TO", "K_TRANSACTION", "K_TRIGGER", "K_UNION", "K_UNIQUE",
"K_UPDATE", "K_USING", "K_VACUUM", "K_VALUES", "K_VIEW",
"K_VIRTUAL", "K_WHEN", "K_WHERE", "K_WITH", "K_WITHOUT",
"IDENTIFIER", "NUMERIC_LITERAL", "BIND_PARAMETER", "STRING_LITERAL",
"BLOB_LITERAL", "SINGLE_LINE_COMMENT", "MULTILINE_COMMENT",
"SPACES", "UNEXPECTED_CHAR", "DIGIT", "A", "B", "C", "D",
"E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O",
"P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" ]
grammarFileName = "JsonSQL.g4"
def __init__(self, input=None):
super().__init__(input)
self.checkVersion("4.5.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
|
from __future__ import unicode_literals
from datetime import datetime
import functools
from django.db.models import Q
from django.conf import settings
from modeltree.tree import trees, MODELTREE_DEFAULT_ALIAS
from preserialize.serialize import serialize
from restlib2.http import codes
from restlib2.params import Parametizer, StrParam
from avocado.events import usage
from avocado.models import DataQuery, DataView, DataContext
from avocado.query import pipeline
from serrano.forms import QueryForm
from serrano.links import reverse_tmpl
from serrano.resources import templates
from serrano.resources.base import ThrottledResource
from serrano.utils import send_mail
DELETE_QUERY_EMAIL_TITLE = "'{0}' has been deleted"
DELETE_QUERY_EMAIL_BODY = """The query named '{0}' has been deleted. You are
being notified because this query was shared with you. This query is no
longer available."""
def query_posthook(instance, data, request):
if getattr(instance, 'user', None) and instance.user.is_authenticated():
data['is_owner'] = instance.user == request.user
else:
data['is_owner'] = instance.session_key == request.session.session_key
if not data['is_owner']:
del data['shared_users']
return data
class QueryParametizer(Parametizer):
tree = StrParam(MODELTREE_DEFAULT_ALIAS, choices=trees)
processor = StrParam('default', choices=pipeline.query_processors)
class QueryBase(ThrottledResource):
cache_max_age = 0
private_cache = True
model = DataQuery
template = templates.Query
parametizer = QueryParametizer
def prepare(self, request, instance, template=None):
if template is None:
template = self.template
posthook = functools.partial(query_posthook, request=request)
return serialize(instance, posthook=posthook, **template)
def get_link_templates(self, request):
uri = request.build_absolute_uri
return {
'self': reverse_tmpl(
uri, 'serrano:queries:single', {'pk': (int, 'id')}),
'forks': reverse_tmpl(
uri, 'serrano:queries:forks', {'pk': (int, 'id')}),
'stats': reverse_tmpl(
uri, 'serrano:queries:stats', {'pk': (int, 'id')}),
'results': reverse_tmpl(
uri, 'serrano:queries:results', {'pk': (int, 'id')}),
}
def get_queryset(self, request, **kwargs):
"Constructs a QuerySet for this user or session."
if getattr(request, 'user', None) and request.user.is_authenticated():
kwargs['user'] = request.user
elif request.session.session_key:
kwargs['session_key'] = request.session.session_key
else:
# The only case where kwargs is empty is for non-authenticated
# cookieless agents.. e.g. bots, most non-browser clients since
# no session exists yet for the agent.
return self.model.objects.none()
return self.model.objects.filter(**kwargs)
def get_request_filters(self, request):
filters = {}
if getattr(request, 'user', None) and request.user.is_authenticated():
filters['user'] = request.user
elif request.session.session_key:
filters['session_key'] = request.session.session_key
return filters
def get_object(self, request, pk=None, session=None, **kwargs):
if not pk and not session:
raise ValueError('A pk or session must used for the lookup')
if not hasattr(request, 'instance'):
queryset = self.get_queryset(request, **kwargs)
instance = None
try:
if pk:
instance = queryset.get(pk=pk)
else:
instance = queryset.get(session=True)
except self.model.DoesNotExist:
if session:
filters = self.get_request_filters(request)
try:
context = DataContext.objects.filter(**filters)\
.get(session=True)
view = DataView.objects.filter(**filters)\
.get(session=True)
instance = DataQuery(context_json=context.json,
view_json=view.json)
except (DataContext.DoesNotExist, DataView.DoesNotExist):
pass
request.instance = instance
return request.instance
class QueriesResource(QueryBase):
"Resource for accessing the queries a shared with or owned by a user"
template = templates.Query
def prepare(self, request, instance, template=None):
if template is None:
template = self.template
posthook = functools.partial(query_posthook, request=request)
return serialize(instance, posthook=posthook, **template)
def get_queryset(self, request, **kwargs):
if getattr(request, 'user', None) and request.user.is_authenticated():
f = Q(user=request.user) | Q(shared_users__pk=request.user.pk)
elif request.session.session_key:
f = Q(session_key=request.session.session_key)
else:
return super(QueriesResource, self).get_queryset(request, **kwargs)
return self.model.objects.filter(f, **kwargs) \
.order_by('-accessed').distinct()
def get(self, request):
queryset = self.get_queryset(request)
return self.prepare(request, queryset)
def post(self, request):
form = QueryForm(request, request.data)
if form.is_valid():
instance = form.save()
usage.log('create', instance=instance, request=request)
request.session.modified = True
response = self.render(request, self.prepare(request, instance),
status=codes.created)
else:
data = {
'message': 'Error creating query',
'errors': dict(form.errors),
}
response = self.render(request, data,
status=codes.unprocessable_entity)
return response
class PublicQueriesResource(QueryBase):
"Resource for accessing public queries"
template = templates.BriefQuery
def prepare(self, request, instance, template=None):
if template is None:
template = self.template
posthook = functools.partial(query_posthook, request=request)
return serialize(instance, posthook=posthook, **template)
def get_queryset(self, request, **kwargs):
kwargs['public'] = True
return self.model.objects.filter(**kwargs).order_by('-accessed') \
.distinct()
def get(self, request):
queryset = self.get_queryset(request)
return self.prepare(request, queryset)
class QueryResource(QueryBase):
"Resource for accessing a single query"
def is_not_found(self, request, response, **kwargs):
return self.get_object(request, **kwargs) is None
def get(self, request, **kwargs):
instance = self.get_object(request, **kwargs)
usage.log('read', instance=instance, request=request)
self.model.objects.filter(pk=instance.pk).update(
accessed=datetime.now())
return self.prepare(request, instance)
def put(self, request, **kwargs):
instance = self.get_object(request, **kwargs)
form = QueryForm(request, request.data, instance=instance)
if form.is_valid():
instance = form.save()
usage.log('update', instance=instance, request=request)
request.session.modified = True
response = self.render(request, self.prepare(request, instance))
else:
data = {
'message': 'Cannot update query',
'errors': dict(form.errors),
}
response = self.render(request, data,
status=codes.unprocessable_entity)
return response
def delete(self, request, **kwargs):
instance = self.get_object(request, **kwargs)
if instance.session:
data = {
'message': 'Cannot delete session query',
}
return self.render(request, data, status=codes.bad_request)
send_mail(instance.shared_users.values_list('email', flat=True),
DELETE_QUERY_EMAIL_TITLE.format(instance.name),
DELETE_QUERY_EMAIL_BODY.format(instance.name))
instance.delete()
usage.log('delete', instance=instance, request=request)
request.session.modified = True
def prune_context(cxt):
if 'children' in cxt:
cxt['children'] = map(prune_context, cxt['children'])
else:
cxt = {
'concept': cxt.get('concept'),
'field': cxt.get('field'),
'operator': cxt.get('operator'),
'value': cxt.get('value'),
}
return cxt
class QuerySqlResource(QueryBase):
def is_unauthorized(self, request, *args, **kwargs):
if super(QuerySqlResource, self)\
.is_unauthorized(request, *args, **kwargs):
return True
return not any((
request.user.is_superuser,
request.user.is_staff,
settings.DEBUG,
))
def is_not_found(self, request, response, **kwargs):
return self.get_object(request, **kwargs) is None
def get(self, request, **kwargs):
params = self.get_params(request)
instance = self.get_object(request, **kwargs)
QueryProcessor = pipeline.query_processors[params['processor']]
processor = QueryProcessor(tree=params['tree'],
context=instance.context,
view=instance.view)
queryset = processor.get_queryset(request=request)
sql, params = queryset.query.get_compiler(queryset.db).as_sql()
return {
'description': {
'context': prune_context(instance.context_json),
'view': instance.view_json,
},
'representation': {
'sql': sql,
'params': params,
},
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.