gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/python
# (c) 2018 Piotr Olczak <[email protected]>
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_info
author: Piotr Olczak (@dprts) <[email protected]>
extends_documentation_fragment:
- netapp.na_ontap
short_description: NetApp information gatherer
description:
- This module allows you to gather various information about ONTAP configuration
version_added: "2.9"
requirements:
- netapp_lib
options:
state:
type: str
description:
- Returns "info"
default: "info"
choices: ['info']
gather_subset:
type: list
description:
- When supplied, this argument will restrict the information collected
to a given subset. Possible values for this argument include
"aggregate_info", "cluster_node_info", "igroup_info", "lun_info", "net_dns_info",
"net_ifgrp_info",
"net_interface_info", "net_port_info", "nvme_info", "nvme_interface_info",
"nvme_namespace_info", "nvme_subsystem_info", "ontap_version",
"qos_adaptive_policy_info", "qos_policy_info", "security_key_manager_key_info",
"security_login_account_info", "storage_failover_info", "volume_info",
"vserver_info", "vserver_login_banner_info", "vserver_motd_info", "vserver_nfs_info"
Can specify a list of values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
- nvme is supported with ONTAP 9.4 onwards.
- use "help" to get a list of supported information for your system.
default: "all"
'''
EXAMPLES = '''
- name: Get NetApp info (Password Authentication)
na_ontap_info:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
register: ontap_info
- debug:
msg: "{{ ontap_info.ontap_info }}"
- name: Limit Info Gathering to Aggregate Information
na_ontap_info:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
gather_subset: "aggregate_info"
register: ontap_info
- name: Limit Info Gathering to Volume and Lun Information
na_ontap_info:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
gather_subset:
- volume_info
- lun_info
register: ontap_info
- name: Gather all info except for volume and lun information
na_ontap_info:
state: info
hostname: "na-vsim"
username: "admin"
password: "admins_password"
gather_subset:
- "!volume_info"
- "!lun_info"
register: ontap_info
'''
RETURN = '''
ontap_info:
description: Returns various information about NetApp cluster configuration
returned: always
type: dict
sample: '{
"ontap_info": {
"aggregate_info": {...},
"cluster_node_info": {...},
"net_dns_info": {...},
"net_ifgrp_info": {...},
"net_interface_info": {...},
"net_port_info": {...},
"security_key_manager_key_info": {...},
"security_login_account_info": {...},
"volume_info": {...},
"lun_info": {...},
"storage_failover_info": {...},
"vserver_login_banner_info": {...},
"vserver_motd_info": {...},
"vserver_info": {...},
"vserver_nfs_info": {...},
"ontap_version": {...},
"igroup_info": {...},
"qos_policy_info": {...},
"qos_adaptive_policy_info": {...}
}'
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
try:
import xmltodict
HAS_XMLTODICT = True
except ImportError:
HAS_XMLTODICT = False
try:
import json
HAS_JSON = True
except ImportError:
HAS_JSON = False
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPGatherInfo(object):
'''Class with gather info methods'''
def __init__(self, module):
self.module = module
self.netapp_info = dict()
# thanks to coreywan (https://github.com/ansible/ansible/pull/47016)
# for starting this
# min_version identifies the ontapi version which supports this ZAPI
# use 0 if it is supported since 9.1
self.info_subsets = {
'net_dns_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'net-dns-get-iter',
'attribute': 'net-dns-info',
'field': 'vserver-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'net_interface_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'net-interface-get-iter',
'attribute': 'net-interface-info',
'field': 'interface-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'net_port_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'net-port-get-iter',
'attribute': 'net-port-info',
'field': ('node', 'port'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'cluster_node_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'cluster-node-get-iter',
'attribute': 'cluster-node-info',
'field': 'node-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'security_login_account_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'security-login-get-iter',
'attribute': 'security-login-account-info',
'field': ('vserver', 'user-name', 'application', 'authentication-method'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'aggregate_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'aggr-get-iter',
'attribute': 'aggr-attributes',
'field': 'aggregate-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'volume_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'volume-get-iter',
'attribute': 'volume-attributes',
'field': ('name', 'owning-vserver-name'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'lun_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'lun-get-iter',
'attribute': 'lun-info',
'field': ('vserver', 'path'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'storage_failover_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'cf-get-iter',
'attribute': 'storage-failover-info',
'field': 'node',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_motd_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'vserver-motd-get-iter',
'attribute': 'vserver-motd-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_login_banner_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'vserver-login-banner-get-iter',
'attribute': 'vserver-login-banner-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'security_key_manager_key_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'security-key-manager-key-get-iter',
'attribute': 'security-key-manager-key-info',
'field': ('node', 'key-id'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'vserver-get-iter',
'attribute': 'vserver-info',
'field': 'vserver-name',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'vserver_nfs_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nfs-service-get-iter',
'attribute': 'nfs-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'net_ifgrp_info': {
'method': self.get_ifgrp_info,
'kwargs': {},
'min_version': '0',
},
'ontap_version': {
'method': self.ontapi,
'kwargs': {},
'min_version': '0',
},
'system_node_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'system-node-get-iter',
'attribute': 'node-details-info',
'field': 'node',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'igroup_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'igroup-get-iter',
'attribute': 'initiator-group-info',
'field': ('vserver', 'initiator-group-name'),
'query': {'max-records': '1024'},
},
'min_version': '0',
},
'qos_policy_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'qos-policy-group-get-iter',
'attribute': 'qos-policy-group-info',
'field': 'policy-group',
'query': {'max-records': '1024'},
},
'min_version': '0',
},
# supported in ONTAP 9.3 and onwards
'qos_adaptive_policy_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'qos-adaptive-policy-group-get-iter',
'attribute': 'qos-adaptive-policy-group-info',
'field': 'policy-group',
'query': {'max-records': '1024'},
},
'min_version': '130',
},
# supported in ONTAP 9.4 and onwards
'nvme_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-get-iter',
'attribute': 'nvme-target-service-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
'nvme_interface_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-interface-get-iter',
'attribute': 'nvme-interface-info',
'field': 'vserver',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
'nvme_subsystem_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-subsystem-get-iter',
'attribute': 'nvme-subsystem-info',
'field': 'subsystem',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
'nvme_namespace_info': {
'method': self.get_generic_get_iter,
'kwargs': {
'call': 'nvme-namespace-get-iter',
'attribute': 'nvme-namespace-info',
'field': 'path',
'query': {'max-records': '1024'},
},
'min_version': '140',
},
}
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def ontapi(self):
'''Method to get ontapi version'''
api = 'system-get-ontapi-version'
api_call = netapp_utils.zapi.NaElement(api)
try:
results = self.server.invoke_successfully(api_call, enable_tunneling=False)
ontapi_version = results.get_child_content('minor-version')
return ontapi_version if ontapi_version is not None else '0'
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error calling API %s: %s" %
(api, to_native(error)), exception=traceback.format_exc())
def call_api(self, call, query=None):
'''Main method to run an API call'''
api_call = netapp_utils.zapi.NaElement(call)
result = None
if query:
for key, val in query.items():
# Can val be nested?
api_call.add_new_child(key, val)
try:
result = self.server.invoke_successfully(api_call, enable_tunneling=False)
return result
except netapp_utils.zapi.NaApiError as error:
if call in ['security-key-manager-key-get-iter']:
return result
else:
self.module.fail_json(msg="Error calling API %s: %s"
% (call, to_native(error)), exception=traceback.format_exc())
def get_ifgrp_info(self):
'''Method to get network port ifgroups info'''
try:
net_port_info = self.netapp_info['net_port_info']
except KeyError:
net_port_info_calls = self.info_subsets['net_port_info']
net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs'])
interfaces = net_port_info.keys()
ifgrps = []
for ifn in interfaces:
if net_port_info[ifn]['port_type'] == 'if_group':
ifgrps.append(ifn)
net_ifgrp_info = dict()
for ifgrp in ifgrps:
query = dict()
query['node'], query['ifgrp-name'] = ifgrp.split(':')
tmp = self.get_generic_get_iter('net-port-ifgrp-get', field=('node', 'ifgrp-name'),
attribute='net-ifgrp-info', query=query)
net_ifgrp_info = net_ifgrp_info.copy()
net_ifgrp_info.update(tmp)
return net_ifgrp_info
def get_generic_get_iter(self, call, attribute=None, field=None, query=None):
'''Method to run a generic get-iter call'''
generic_call = self.call_api(call, query)
if call == 'net-port-ifgrp-get':
children = 'attributes'
else:
children = 'attributes-list'
if generic_call is None:
return None
if field is None:
out = []
else:
out = {}
attributes_list = generic_call.get_child_by_name(children)
if attributes_list is None:
return None
for child in attributes_list.get_children():
dic = xmltodict.parse(child.to_string(), xml_attribs=False)
if attribute is not None:
dic = dic[attribute]
if isinstance(field, str):
unique_key = _finditem(dic, field)
out = out.copy()
out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
elif isinstance(field, tuple):
unique_key = ':'.join([_finditem(dic, el) for el in field])
out = out.copy()
out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
else:
out.append(convert_keys(json.loads(json.dumps(dic))))
return out
def get_all(self, gather_subset):
'''Method to get all subsets'''
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_info", cserver)
self.netapp_info['ontap_version'] = self.ontapi()
run_subset = self.get_subset(gather_subset, self.netapp_info['ontap_version'])
if 'help' in gather_subset:
self.netapp_info['help'] = sorted(run_subset)
else:
for subset in run_subset:
call = self.info_subsets[subset]
self.netapp_info[subset] = call['method'](**call['kwargs'])
return self.netapp_info
def get_subset(self, gather_subset, version):
'''Method to get a single subset'''
runable_subsets = set()
exclude_subsets = set()
usable_subsets = [key for key in self.info_subsets.keys() if version >= self.info_subsets[key]['min_version']]
if 'help' in gather_subset:
return usable_subsets
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(usable_subsets)
return runable_subsets
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
return set()
exclude = True
else:
exclude = False
if subset not in usable_subsets:
if subset not in self.info_subsets.keys():
self.module.fail_json(msg='Bad subset: %s' % subset)
self.module.fail_json(msg='Remote system at version %s does not support %s' %
(version, subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(usable_subsets)
runable_subsets.difference_update(exclude_subsets)
return runable_subsets
# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary
def __finditem(obj, key):
if key in obj:
return obj[key]
for dummy, val in obj.items():
if isinstance(val, dict):
item = __finditem(val, key)
if item is not None:
return item
return None
def _finditem(obj, key):
value = __finditem(obj, key)
if value is not None:
return value
raise KeyError(key)
def convert_keys(d_param):
'''Method to convert hyphen to underscore'''
out = {}
if isinstance(d_param, dict):
for key, val in d_param.items():
val = convert_keys(val)
out[key.replace('-', '_')] = val
else:
return d_param
return out
def main():
'''Execute action'''
argument_spec = netapp_utils.na_ontap_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='info', choices=['info']),
gather_subset=dict(default=['all'], type='list'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_XMLTODICT:
module.fail_json(msg="xmltodict missing")
if not HAS_JSON:
module.fail_json(msg="json missing")
state = module.params['state']
gather_subset = module.params['gather_subset']
if gather_subset is None:
gather_subset = ['all']
gf_obj = NetAppONTAPGatherInfo(module)
gf_all = gf_obj.get_all(gather_subset)
result = {'state': state, 'changed': False}
module.exit_json(ontap_info=gf_all, **result)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
#
# tree.py
#
# (c) D.C.-G. 2014
#
# Tree widget for albow
#
from albow.widget import Widget
from albow.menu import Menu
from albow.fields import IntField, FloatField, TextFieldWrapped
from albow.controls import CheckBox, AttrRef, Label, Button
from albow.dialogs import ask, alert, input_text_buttons
from albow.translate import _
from extended_widgets import ChoiceButton
from theme import ThemeProperty
from layout import Column, Row
from dialogs import Dialog
from palette_view import PaletteView
from scrollpanel import ScrollRow
from utils import blit_in_rect
from pygame import image, Surface, Rect, SRCALPHA, draw, event
import copy
#-----------------------------------------------------------------------------
item_types_map = {dict: ("Compound", None, {}),
int: ("Integer", IntField, 0),
float: ("Floating point", FloatField, 0.0),
unicode: ("Text", TextFieldWrapped, ""),
bool: ("Boolean", CheckBox, True),
}
def setup_map_types_item(mp=None):
if not mp:
mp = item_types_map
map_types_item = {}
for k, v in mp.items():
if v[0] in map_types_item.keys():
_v = map_types_item.pop(v[0])
map_types_item[u"%s (%s)"%(_(v[0]), _v[0].__name__)] = _v
map_types_item[u"%s (%s)"%(_(v[0]), k.__name__)] = (k, v[1], v[2])
else:
map_types_item[v[0]] = (k, v[1], v[2])
return map_types_item
map_types_item = setup_map_types_item()
#-----------------------------------------------------------------------------
# Tree item builder methods
def create_base_item(self, i_type, i_name, i_value):
return i_name, type(i_type)(i_value)
create_dict = create_int = create_float = create_unicode = create_bool = create_base_item
#-----------------------------------------------------------------------------
class SetupNewItemPanel(Dialog):
def __init__(self, type_string, types=map_types_item, ok_action=None):
self.type_string = type_string
self.ok_action = ok_action
title = Label("Choose default data")
self.t, widget, self.v = types[type_string]
self.n = u""
w_name = TextFieldWrapped(ref=AttrRef(self, 'n'))
self.w_value = self.get_widget(widget)
col = Column([Column([title,]), Label(_("Item Type: %s")%type_string, doNotTranslate=True), Row([Label("Name"), w_name], margin=0), Row([Label("Value"), self.w_value], margin=0), Row([Button("OK", action=ok_action or self.dismiss_ok), Button("Cancel", action=self.dismiss)], margin=0)], margin=0, spacing=2)
Dialog.__init__(self, client=col)
def dismiss_ok(self):
self.dismiss((self.t, self.n, getattr(self.w_value, 'value', map_types_item.get(self.type_string, [None,] * 3)[2])))
def get_widget(self, widget):
if hasattr(widget, 'value'):
value = widget(value=self.v)
elif hasattr(widget, 'text'):
value = widget(text=self.v)
elif widget is None:
value = Label("This item type is a container. Add chlidren later.")
else:
msg = "*** Error in SelectItemTypePanel.__init__():\n Widget <%s> has no 'text' or 'value' member."%widget
print msg
value = Label(msg)
return value
#-----------------------------------------------------------------------------
class SelectItemTypePanel(Dialog):
def __init__(self, title, responses, default=None, ok_action=None):
self.response = responses[0]
self.ok_action = ok_action
title = Label(title)
self.w_type = ChoiceButton(responses)
col = Column([title, self.w_type, Row([Button("OK", action=ok_action or self.dismiss_ok), Button("Cancel", action=ok_action or self.dismiss)], margin=0)], margin=0, spacing=2)
Dialog.__init__(self, client=col)
def dismiss_ok(self):
self.dismiss(self.w_type.selectedChoice)
#-----------------------------------------------------------------------------
def select_item_type(ok_action, types=map_types_item):
if len(types) > 1:
choices = types.keys()
choices.sort()
result = SelectItemTypePanel("Choose item type", responses=choices, default=None).present()
else:
result = types.keys()[0]
if type(result) in (str, unicode):
return SetupNewItemPanel(result, types, ok_action).present()
return None
#-----------------------------------------------------------------------------
class TreeRow(ScrollRow):
def click_item(self, n, e):
self.parent.click_item(n, e.local)
def mouse_down(self, e):
if e.button == 3:
_e = event.Event(e.type, {'alt': e.alt, 'meta': e.meta, 'ctrl': e.ctrl,
'shift': e.shift, 'button': 1, 'cmd': e.cmd,
'local': e.local, 'pos': e.pos,
'num_clicks': e.num_clicks})
ScrollRow.mouse_down(self, _e)
self.parent.show_menu(e.local)
else:
ScrollRow.mouse_down(self, e)
#-----------------------------------------------------------------------------
class Tree(Column):
"""..."""
rows = []
row_margin = 2
column_margin = 2
bullet_size = ThemeProperty('bullet_size')
bullet_color_active = ThemeProperty('bullet_color_active')
bullet_color_inactive = ThemeProperty('bullet_color_inactive')
def __init__(self, *args, **kwargs):
self.menu = [("Add", "add_item"),
("Delete", "delete_item"),
("New child", "add_child"),
("Rename", "rename_item"),
("", ""),
("Cut", "cut_item"),
("Copy", "copy_item"),
("Paste", "paste_item"),
("Paste as child", "paste_child"),
]
if not hasattr(self, 'map_types_item'):
global map_types_item
self.map_types_item = setup_map_types_item()
self.selected_item_index = None
# cached_item_index is set to False during startup to avoid a predefined selected item to be unselected when closed
# the first time.
self.cached_selected_item_index = False
self.selected_item = None
self.clicked_item = None
self.copyBuffer = kwargs.pop('copyBuffer', None)
self._parent = kwargs.pop('_parent', None)
self.styles = kwargs.pop('styles', {})
self.compound_types = [dict,] + kwargs.pop('compound_types', [])
self.item_types = self.compound_types + kwargs.pop('item_types', [a[0] for a in self.map_types_item.values()] or [int, float, unicode, bool])
for t in self.item_types:
if 'create_%s'%t.__name__ in globals().keys():
setattr(self, 'create_%s'%t.__name__, globals()['create_%s'%t.__name__])
self.show_fields = kwargs.pop('show_fields', False)
self.deployed = []
self.data = data = kwargs.pop("data", {})
self.draw_zebra = draw_zebra = kwargs.pop('draw_zebra', True)
# self.inner_width = kwargs.pop('inner_width', 'auto')
self.inner_width = kwargs.pop('inner_width', 500)
self.__num_rows = len(data.keys())
self.build_layout()
# row_height = self.font.size(' ')[1]
row_height = self.font.get_linesize()
self.treeRow = treeRow = TreeRow((self.inner_width, row_height), 10, draw_zebra=draw_zebra)
Column.__init__(self, [treeRow,], **kwargs)
def dispatch_key(self, name, evt):
if not hasattr(evt, 'key'):
return
if name == "key_down":
keyname = self.root.getKey(evt)
if keyname == "Up" and self.selected_item_index > 0:
if self.selected_item_index is None:
self.selected_item_index = -1
self.selected_item_index = max(self.selected_item_index - 1, 0)
keyname = 'Return'
elif keyname == "Down" and self.selected_item_index < len(self.rows) - 1:
if self.selected_item_index is None:
self.selected_item_index = -1
self.selected_item_index += 1
keyname = 'Return'
elif keyname == 'Page down':
if self.selected_item_index is None:
self.selected_item_index = -1
self.selected_item_index = min(len(self.rows) - 1, self.selected_item_index + self.treeRow.num_rows())
keyname = 'Return'
elif keyname == 'Page up':
if self.selected_item_index is None:
self.selected_item_index = -1
self.selected_item_index = max(0, self.selected_item_index - self.treeRow.num_rows())
keyname = 'Return'
if self.treeRow.cell_to_item_no(0, 0) is not None and (self.treeRow.cell_to_item_no(0, 0) + self.treeRow.num_rows() -1 > self.selected_item_index or self.treeRow.cell_to_item_no(0, 0) + self.treeRow.num_rows() -1 < self.selected_item_index):
self.treeRow.scroll_to_item(self.selected_item_index)
if keyname == 'Return' and self.selected_item_index != None:
self.select_item(self.selected_item_index)
if self.selected_item[7] in self.compound_types:
self.deploy(self.selected_item[6])
if self.selected_item is not None and hasattr(self, "update_side_panel"):
self.update_side_panel(self.selected_item)
def cut_item(self):
self.copyBuffer = ([] + self.selected_item, 1)
self.delete_item()
def copy_item(self):
self.copyBuffer = ([] + self.selected_item, 0)
def paste_item(self):
parent = self.get_item_parent(self.selected_item)
name = self.copyBuffer[0][3]
old_name = u"%s"%self.copyBuffer[0][3]
if self.copyBuffer[1] == 0:
name = input_text_buttons("Choose a name", 300, self.copyBuffer[0][3])
else:
old_name = ""
if name and type(name) in (str, unicode) and name != old_name:
new_item = copy.deepcopy(self.copyBuffer[0][9])
if hasattr(new_item, 'name'):
new_item.name = name
self.add_item_to(parent, (name, new_item))
def paste_child(self):
name = self.copyBuffer[0][3]
old_name = u"%s"%self.copyBuffer[0][3]
names = []
children = self.get_item_children(self.selected_item)
if children:
names = [a[3] for a in children]
if name in names:
name = input_text_buttons("Choose a name", 300, self.copyBuffer[0][3])
else:
old_name = ""
if name and type(name) in (str, unicode) and name != old_name:
new_item = copy.deepcopy(self.copyBuffer[0][9])
if hasattr(new_item, 'name'):
new_item.name = name
self.add_item_to(self.selected_item, (name, new_item))
@staticmethod
def add_item_to_dict(parent, name, item):
parent[name] = item
def add_item_to(self, parent, (name, item)):
if parent is None:
tp = 'dict'
parent = self.data
else:
tp = parent[7].__name__
parent = parent[9]
if not name:
i = 0
name = 'Item %03d'%i
while name in self.data.keys():
i += 1
name = 'Item %03d'%i
meth = getattr(self, 'add_item_to_%s'%tp, None)
if meth:
meth(parent, name, item)
self.build_layout()
else:
alert(_("No function implemented to add items to %s type.")%type(parent).__name__, doNotTranslate=True)
def add_item(self, types_item=None):
r = select_item_type(None, types_item or self.map_types_item)
if type(r) in (list, tuple):
t, n, v = r
meth = getattr(self, 'create_%s'%t.__name__, None)
if meth:
new_item = meth(self, t, n, v)
self.add_item_to(self.get_item_parent(self.selected_item), new_item)
def add_child(self, types_item=None):
r = select_item_type(None, types_item or self.map_types_item)
if type(r) in (list, tuple):
t, n, v = r
meth = getattr(self, 'create_%s'%t.__name__, None)
if meth:
new_item = meth(self, t, n, v)
self.add_item_to(self.selected_item, new_item)
def delete_item(self):
parent = self.get_item_parent(self.selected_item) or self.data
del parent[self.selected_item]
self.selected_item_index = None
self.selected_item = None
self.build_layout()
def rename_item(self):
result = input_text_buttons("Choose a name", 300, self.selected_item[3])
if type(result) in (str, unicode):
self.selected_item[3] = result
self.build_layout()
def get_item_parent(self, item):
if item:
pid = item[4]
for itm in self.rows:
if pid == itm[6]:
return itm
def get_item_children(self, item):
children = []
if item:
if item[6] in self.deployed:
cIds = item[5]
idx = self.rows.index(item)
for child in self.rows[idx:]:
if child[8] == item[8] + 1 and child[4] == item[6]:
children.append(child)
else:
k = item[3]
v = item[9]
lvl = item[8]
id = item[6]
aId = len(self.rows) + 1
meth = getattr(self, 'parse_%s'%v.__class__.__name__, None)
if meth is not None:
_v = meth(k, v)
else:
_v = v
ks = _v.keys()
ks.sort()
ks.reverse()
for a in ks:
b = _v[a]
itm = [lvl + 1, a, b, id, [], aId]
itm = [None, None, None, a, id, [], aId, type(b), lvl + 1, b]
children.insert(0, itm)
aId += 1
return children
def show_menu(self, pos):
if self.menu:
m = Menu("Menu", self.menu, handler=self)
i = m.present(self, pos)
if i > -1:
meth = getattr(self, self.menu[i][1], None)
if meth:
meth()
def cut_item_enabled(self):
return self.selected_item is not None
def copy_item_enabled(self):
return self.cut_item_enabled()
def paste_item_enabled(self):
return self.copyBuffer is not None
def paste_child_enabled(self):
if not self.selected_item:
return False
return self.paste_item_enabled() and self.selected_item[7] in self.compound_types
def add_item_enabled(self):
return True
def add_child_enabled(self):
if not self.selected_item:
return False
return self.selected_item[7] in self.compound_types
def delete_item_enabled(self):
return self.selected_item is not None
def rename_item_enabled(self):
return self.selected_item is not None
def build_layout(self):
data = self.data
parent = 0
children = []
keys = data.keys()
keys.sort()
items = [[0, a, data[a], parent, children, keys.index(a) + 1] for a in keys]
rows = []
w = 50
aId = len(items) + 1
while items:
lvl, k, v, p, c, id = items.pop(0)
t = None
_c = False
fields = []
c = [] + c
# If the 'v' object is a dict containing the keys 'value' and 'tooltipText',
# extract the text, and override the 'v' object with the 'value' value.
if type(v) == dict and len(v.keys()) and ('value' in v.keys() and 'tooltipText' in v.keys()):
t = v['tooltipText']
if type(t) not in (str, unicode):
t = repr(t)
v = v['value']
if type(v) in self.compound_types:
meth = getattr(self, 'parse_%s'%v.__class__.__name__, None)
if meth is not None:
_v = meth(k, v)
else:
_v = v
ks = _v.keys()
ks.sort()
ks.reverse()
for a in ks:
b = _v[a]
if id in self.deployed:
itm = [lvl + 1, a, b, id, [], aId]
items.insert(0, itm)
c.append(aId)
_c = True
aId += 1
else:
if type(v) in (list, tuple):
fields = v
elif type(v) not in self.compound_types or hasattr(self._parent, 'build_%s'%k.lower()):
fields = [v,]
head = Surface((self.bullet_size * (lvl + 1) + self.font.size(k)[0], self.bullet_size), SRCALPHA)
if _c:
meth = getattr(self, 'draw_%s_bullet'%{False: 'closed', True: 'opened'}[id in self.deployed])
else:
meth = getattr(self, 'draw_%s_bullet'%v.__class__.__name__, None)
if not meth:
meth = self.draw_deadend_bullet
bg, fg, shape, text = self.styles.get(type(v),
({True: self.bullet_color_active, False: self.bullet_color_inactive}[_c],
self.fg_color, 'square', ''),
)
try:
meth(head, bg, fg, shape, text, k, lvl)
except:
pass
rows.append([head, fields, [w] * len(fields), k, p, c, id, type(v), lvl, v, t])
self.rows = rows
return rows
def deploy(self, n):
id = self.rows[n][6]
if id in self.deployed:
while id in self.deployed:
self.deployed.remove(id)
else:
self.deployed.append(id)
self.build_layout()
l = (self.selected_item[3], self.selected_item[4])
if type(self.cached_selected_item_index) != bool:
if self.cached_selected_item_index and self.cached_selected_item_index < self.num_rows():
r = self.rows[self.cached_selected_item_index]
r = (r[3], r[4])
else:
r = (-1, -1)
else:
r = l
self.cached_selected_item_index = self.selected_item_index
if l == r:
self.selected_item_index = self.cached_selected_item_index
else:
self.cached_selected_item_index = self.selected_item_index
self.selected_item_index = None
def click_item(self, n, pos):
"""..."""
self.clicked_item = row = self.rows[n]
r = self.get_bullet_rect(row[0], row[8])
x = pos[0]
if self.margin + r.left - self.treeRow.hscroll <= x <= self.margin + self.treeRow.margin + r.right - self.treeRow.hscroll:
self.deploy(n)
else:
self.select_item(n)
def select_item(self, n):
self.selected_item_index = n
self.selected_item = self.rows[n]
def get_bullet_rect(self, surf, lvl):
r = Rect(0, 0, self.bullet_size, self.bullet_size)
r.left = self.bullet_size * lvl
r.inflate_ip(-4, -4)
return r
def draw_item_text(self, surf, r, text):
buf = self.font.render(unicode(text), True, self.fg_color)
blit_in_rect(surf, buf, Rect(r.right, r.top, surf.get_width() - r.right, r.height), 'c')
def draw_deadend_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.midtop, r.midright, r.midbottom, r.midleft])
self.draw_item_text(surf, r, item_text)
def draw_closed_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.topleft, r.midright, r.bottomleft])
self.draw_item_text(surf, r, item_text)
def draw_opened_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.topleft, r.midbottom, r.topright])
self.draw_item_text(surf, r, item_text)
def draw_tree_cell(self, surf, i, data, cell_rect, column):
"""..."""
if type(data) in (str, unicode):
self.draw_text_cell(surf, i, data, cell_rect, 'l', self.font)
else:
self.draw_image_cell(surf, i, data, cell_rect, column)
@staticmethod
def draw_image_cell(surf, i, data, cell_rect, column):
"""..."""
blit_in_rect(surf, data, cell_rect, 'l')
def draw_text_cell(self, surf, i, data, cell_rect, align, font):
buf = font.render(unicode(data), True, self.fg_color)
blit_in_rect(surf, buf, cell_rect, align)
def num_rows(self):
return len(self.rows)
def row_data(self, row):
return self.rows[row]
def column_info(self, row_data):
m = self.column_margin
d = 2 * m
x = 0
for i in range(0,2):
if i < 1:
width = self.width
data = row_data[i]
yield i, x + m, width - d, None, data
x += width
if self.show_fields:
for i in range(len(row_data[2])):
width = 50 * (i + 1)
data = row_data[2][i]
if type(data) != (str, unicode):
data = repr(data)
yield i, x + m, width - d, None, data
x += width
|
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo.config import cfg
from neutron.common import constants as n_const
import neutron.db.api as ndb
from neutron.plugins.ml2.drivers.mech_arista import db
from neutron.plugins.ml2.drivers.mech_arista import exceptions as arista_exc
from neutron.plugins.ml2.drivers.mech_arista import mechanism_arista as arista
from neutron.tests import base
def setup_arista_wrapper_config(value=''):
cfg.CONF.keystone_authtoken = fake_keystone_info_class()
cfg.CONF.set_override('eapi_host', value, "ml2_arista")
cfg.CONF.set_override('eapi_username', value, "ml2_arista")
def setup_valid_config():
# Config is not valid if value is not set
setup_arista_wrapper_config('value')
class AristaProvisionedVlansStorageTestCase(base.BaseTestCase):
"""Test storing and retriving functionality of Arista mechanism driver.
Tests all methods of this class by invoking them separately as well
as a group.
"""
def setUp(self):
super(AristaProvisionedVlansStorageTestCase, self).setUp()
ndb.configure_db()
self.addCleanup(ndb.clear_db)
def test_tenant_is_remembered(self):
tenant_id = 'test'
db.remember_tenant(tenant_id)
net_provisioned = db.is_tenant_provisioned(tenant_id)
self.assertTrue(net_provisioned, 'Tenant must be provisioned')
def test_tenant_is_removed(self):
tenant_id = 'test'
db.remember_tenant(tenant_id)
db.forget_tenant(tenant_id)
net_provisioned = db.is_tenant_provisioned(tenant_id)
self.assertFalse(net_provisioned, 'The Tenant should be deleted')
def test_network_is_remembered(self):
tenant_id = 'test'
network_id = '123'
segmentation_id = 456
db.remember_network(tenant_id, network_id, segmentation_id)
net_provisioned = db.is_network_provisioned(tenant_id,
network_id)
self.assertTrue(net_provisioned, 'Network must be provisioned')
def test_network_is_removed(self):
tenant_id = 'test'
network_id = '123'
db.remember_network(tenant_id, network_id, '123')
db.forget_network(tenant_id, network_id)
net_provisioned = db.is_network_provisioned(tenant_id, network_id)
self.assertFalse(net_provisioned, 'The network should be deleted')
def test_vm_is_remembered(self):
vm_id = 'VM-1'
tenant_id = 'test'
network_id = '123'
port_id = 456
host_id = 'ubuntu1'
db.remember_vm(vm_id, host_id, port_id, network_id, tenant_id)
vm_provisioned = db.is_vm_provisioned(vm_id, host_id, port_id,
network_id, tenant_id)
self.assertTrue(vm_provisioned, 'VM must be provisioned')
def test_vm_is_removed(self):
vm_id = 'VM-1'
tenant_id = 'test'
network_id = '123'
port_id = 456
host_id = 'ubuntu1'
db.remember_vm(vm_id, host_id, port_id, network_id, tenant_id)
db.forget_vm(vm_id, host_id, port_id, network_id, tenant_id)
vm_provisioned = db.is_vm_provisioned(vm_id, host_id, port_id,
network_id, tenant_id)
self.assertFalse(vm_provisioned, 'The vm should be deleted')
def test_remembers_multiple_networks(self):
tenant_id = 'test'
expected_num_nets = 100
nets = ['id%s' % n for n in range(expected_num_nets)]
for net_id in nets:
db.remember_network(tenant_id, net_id, 123)
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
self.assertEqual(expected_num_nets, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected_num_nets, num_nets_provisioned))
def test_removes_all_networks(self):
tenant_id = 'test'
num_nets = 100
old_nets = db.num_nets_provisioned(tenant_id)
nets = ['id_%s' % n for n in range(num_nets)]
for net_id in nets:
db.remember_network(tenant_id, net_id, 123)
for net_id in nets:
db.forget_network(tenant_id, net_id)
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
expected = old_nets
self.assertEqual(expected, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected, num_nets_provisioned))
def test_remembers_multiple_tenants(self):
expected_num_tenants = 100
tenants = ['id%s' % n for n in range(expected_num_tenants)]
for tenant_id in tenants:
db.remember_tenant(tenant_id)
num_tenants_provisioned = db.num_provisioned_tenants()
self.assertEqual(expected_num_tenants, num_tenants_provisioned,
'There should be %d tenants, not %d' %
(expected_num_tenants, num_tenants_provisioned))
def test_removes_multiple_tenants(self):
num_tenants = 100
tenants = ['id%s' % n for n in range(num_tenants)]
for tenant_id in tenants:
db.remember_tenant(tenant_id)
for tenant_id in tenants:
db.forget_tenant(tenant_id)
num_tenants_provisioned = db.num_provisioned_tenants()
expected = 0
self.assertEqual(expected, num_tenants_provisioned,
'There should be %d tenants, not %d' %
(expected, num_tenants_provisioned))
def test_num_vm_is_valid(self):
tenant_id = 'test'
network_id = '123'
port_id = 456
host_id = 'ubuntu1'
vm_to_remember = ['vm1', 'vm2', 'vm3']
vm_to_forget = ['vm2', 'vm1']
for vm in vm_to_remember:
db.remember_vm(vm, host_id, port_id, network_id, tenant_id)
for vm in vm_to_forget:
db.forget_vm(vm, host_id, port_id, network_id, tenant_id)
num_vms = len(db.get_vms(tenant_id))
expected = len(vm_to_remember) - len(vm_to_forget)
self.assertEqual(expected, num_vms,
'There should be %d records, '
'got %d records' % (expected, num_vms))
# clean up afterwards
db.forget_vm('vm3', host_id, port_id, network_id, tenant_id)
def test_get_network_list_returns_eos_compatible_data(self):
tenant = u'test-1'
segm_type = 'vlan'
network_id = u'123'
network2_id = u'1234'
vlan_id = 123
vlan2_id = 1234
expected_eos_net_list = {network_id: {u'networkId': network_id,
u'segmentationTypeId': vlan_id,
u'segmentationType': segm_type},
network2_id: {u'networkId': network2_id,
u'segmentationTypeId': vlan2_id,
u'segmentationType': segm_type}}
db.remember_network(tenant, network_id, vlan_id)
db.remember_network(tenant, network2_id, vlan2_id)
net_list = db.get_networks(tenant)
self.assertNotEqual(net_list != expected_eos_net_list, ('%s != %s' %
(net_list, expected_eos_net_list)))
class PositiveRPCWrapperValidConfigTestCase(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista Driver and EOS
"""
def setUp(self):
super(PositiveRPCWrapperValidConfigTestCase, self).setUp()
setup_valid_config()
self.drv = arista.AristaRPCWrapper()
self.region = 'RegionOne'
self.drv._server = mock.MagicMock()
def _get_exit_mode_cmds(self, modes):
return ['exit'] * len(modes)
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_plug_host_into_network(self):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
port_name = '123-port'
self.drv.plug_host_into_network(vm_id, host, port_id,
network_id, tenant_id, port_name)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'vm id vm-1 hostid host',
'port id 123 name "123-port" network-id net-id',
'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_plug_dhcp_port_into_network(self):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
port_name = '123-port'
self.drv.plug_dhcp_port_into_network(vm_id, host, port_id,
network_id, tenant_id, port_name)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id',
'dhcp id vm-1 hostid host port-id 123 name "123-port"',
'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_unplug_host_from_network(self):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
self.drv.unplug_host_from_network(vm_id, host, port_id,
network_id, tenant_id)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'vm id vm-1 hostid host',
'no port id 123',
'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_unplug_dhcp_port_from_network(self):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
self.drv.unplug_dhcp_port_from_network(vm_id, host, port_id,
network_id, tenant_id)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id',
'no dhcp id vm-1 port-id 123',
'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_create_network(self):
tenant_id = 'ten-1'
network = {
'network_id': 'net-id',
'network_name': 'net-name',
'segmentation_id': 123}
self.drv.create_network(tenant_id, network)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id name "net-name"',
'segment 1 type vlan id 123',
'exit', 'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_create_network_bulk(self):
tenant_id = 'ten-2'
num_networks = 10
networks = [{
'network_id': 'net-id-%d' % net_id,
'network_name': 'net-name-%d' % net_id,
'segmentation_id': net_id} for net_id in range(1, num_networks)
]
self.drv.create_network_bulk(tenant_id, networks)
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'region RegionOne',
'tenant ten-2']
for net_id in range(1, num_networks):
cmds.append('network id net-id-%d name "net-name-%d"' %
(net_id, net_id))
cmds.append('segment 1 type vlan id %d' % net_id)
cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack',
'cvx', 'configure', 'enable']))
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_network(self):
tenant_id = 'ten-1'
network_id = 'net-id'
self.drv.delete_network(tenant_id, network_id)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'no network id net-id',
'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_network_bulk(self):
tenant_id = 'ten-2'
num_networks = 10
networks = [{
'network_id': 'net-id-%d' % net_id,
'network_name': 'net-name-%d' % net_id,
'segmentation_id': net_id} for net_id in range(1, num_networks)
]
networks = ['net-id-%d' % net_id for net_id in range(1, num_networks)]
self.drv.delete_network_bulk(tenant_id, networks)
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'region RegionOne',
'tenant ten-2']
for net_id in range(1, num_networks):
cmds.append('no network id net-id-%d' % net_id)
cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack',
'cvx', 'configure']))
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_vm(self):
tenant_id = 'ten-1'
vm_id = 'vm-id'
self.drv.delete_vm(tenant_id, vm_id)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'no vm id vm-id',
'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_vm_bulk(self):
tenant_id = 'ten-2'
num_vms = 10
vm_ids = ['vm-id-%d' % vm_id for vm_id in range(1, num_vms)]
self.drv.delete_vm_bulk(tenant_id, vm_ids)
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'region RegionOne',
'tenant ten-2']
for vm_id in range(1, num_vms):
cmds.append('no vm id vm-id-%d' % vm_id)
cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack',
'cvx', 'configure']))
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_create_vm_port_bulk(self):
tenant_id = 'ten-3'
num_vms = 10
num_ports_per_vm = 2
vms = dict(
('vm-id-%d' % vm_id, {
'vmId': 'vm-id-%d' % vm_id,
'host': 'host_%d' % vm_id,
}
) for vm_id in range(1, num_vms)
)
devices = [n_const.DEVICE_OWNER_DHCP, 'compute']
vm_port_list = []
net_count = 1
for vm_id in range(1, num_vms):
for port_id in range(1, num_ports_per_vm):
port = {
'id': 'port-id-%d-%d' % (vm_id, port_id),
'device_id': 'vm-id-%d' % vm_id,
'device_owner': devices[(vm_id + port_id) % 2],
'network_id': 'network-id-%d' % net_count,
'name': 'port-%d-%d' % (vm_id, port_id)
}
vm_port_list.append(port)
net_count += 1
self.drv.create_vm_port_bulk(tenant_id, vm_port_list, vms)
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'region RegionOne',
'tenant ten-3']
net_count = 1
for vm_count in range(1, num_vms):
host = 'host_%s' % vm_count
for port_count in range(1, num_ports_per_vm):
vm_id = 'vm-id-%d' % vm_count
device_owner = devices[(vm_count + port_count) % 2]
port_name = '"port-%d-%d"' % (vm_count, port_count)
network_id = 'network-id-%d' % net_count
port_id = 'port-id-%d-%d' % (vm_count, port_count)
if device_owner == 'network:dhcp':
cmds.append('network id %s' % network_id)
cmds.append('dhcp id %s hostid %s port-id %s name %s' % (
vm_id, host, port_id, port_name))
elif device_owner == 'compute':
cmds.append('vm id %s hostid %s' % (vm_id, host))
cmds.append('port id %s name %s network-id %s' % (
port_id, port_name, network_id))
net_count += 1
cmds.extend(self._get_exit_mode_cmds(['tenant', 'region',
'openstack', 'cvx']))
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_tenant(self):
tenant_id = 'ten-1'
self.drv.delete_tenant(tenant_id)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne', 'no tenant ten-1',
'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_tenant_bulk(self):
num_tenants = 10
tenant_list = ['ten-%d' % t_id for t_id in range(1, num_tenants)]
self.drv.delete_tenant_bulk(tenant_list)
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'region RegionOne']
for ten_id in range(1, num_tenants):
cmds.append('no tenant ten-%d' % ten_id)
cmds.extend(self._get_exit_mode_cmds(['region', 'openstack',
'cvx', 'configure']))
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_get_network_info_returns_none_when_no_such_net(self):
expected = []
self.drv.get_tenants = mock.MagicMock()
self.drv.get_tenants.return_value = []
net_info = self.drv.get_tenants()
self.drv.get_tenants.assert_called_once_with()
self.assertEqual(net_info, expected, ('Network info must be "None"'
'for unknown network'))
def test_get_network_info_returns_info_for_available_net(self):
valid_network_id = '12345'
valid_net_info = {'network_id': valid_network_id,
'some_info': 'net info'}
known_nets = valid_net_info
self.drv.get_tenants = mock.MagicMock()
self.drv.get_tenants.return_value = known_nets
net_info = self.drv.get_tenants()
self.assertEqual(net_info, valid_net_info,
('Must return network info for a valid net'))
def test_check_cli_commands(self):
self.drv.check_cli_commands()
cmds = ['show openstack config region RegionOne timestamp']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
class AristaRPCWrapperInvalidConfigTestCase(base.BaseTestCase):
"""Negative test cases to test the Arista Driver configuration."""
def setUp(self):
super(AristaRPCWrapperInvalidConfigTestCase, self).setUp()
self.setup_invalid_config() # Invalid config, required options not set
def setup_invalid_config(self):
setup_arista_wrapper_config('')
def test_raises_exception_on_wrong_configuration(self):
self.assertRaises(arista_exc.AristaConfigError,
arista.AristaRPCWrapper)
class NegativeRPCWrapperTestCase(base.BaseTestCase):
"""Negative test cases to test the RPC between Arista Driver and EOS."""
def setUp(self):
super(NegativeRPCWrapperTestCase, self).setUp()
setup_valid_config()
def test_exception_is_raised_on_json_server_error(self):
drv = arista.AristaRPCWrapper()
drv._server = mock.MagicMock()
drv._server.runCmds.side_effect = Exception('server error')
self.assertRaises(arista_exc.AristaRpcError, drv.get_tenants)
class RealNetStorageAristaDriverTestCase(base.BaseTestCase):
"""Main test cases for Arista Mechanism driver.
Tests all mechanism driver APIs supported by Arista Driver. It invokes
all the APIs as they would be invoked in real world scenarios and
verifies the functionality.
"""
def setUp(self):
super(RealNetStorageAristaDriverTestCase, self).setUp()
self.fake_rpc = mock.MagicMock()
ndb.configure_db()
self.drv = arista.AristaDriver(self.fake_rpc)
def tearDown(self):
super(RealNetStorageAristaDriverTestCase, self).tearDown()
self.drv.stop_synchronization_thread()
def test_create_and_delete_network(self):
tenant_id = 'ten-1'
network_id = 'net1-id'
segmentation_id = 1001
network_context = self._get_network_context(tenant_id,
network_id,
segmentation_id)
self.drv.create_network_precommit(network_context)
net_provisioned = db.is_network_provisioned(tenant_id, network_id)
self.assertTrue(net_provisioned, 'The network should be created')
expected_num_nets = 1
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
self.assertEqual(expected_num_nets, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected_num_nets, num_nets_provisioned))
#Now test the delete network
self.drv.delete_network_precommit(network_context)
net_provisioned = db.is_network_provisioned(tenant_id, network_id)
self.assertFalse(net_provisioned, 'The network should be created')
expected_num_nets = 0
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
self.assertEqual(expected_num_nets, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected_num_nets, num_nets_provisioned))
def test_create_and_delete_multiple_networks(self):
tenant_id = 'ten-1'
expected_num_nets = 100
segmentation_id = 1001
nets = ['id%s' % n for n in range(expected_num_nets)]
for net_id in nets:
network_context = self._get_network_context(tenant_id,
net_id,
segmentation_id)
self.drv.create_network_precommit(network_context)
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
self.assertEqual(expected_num_nets, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected_num_nets, num_nets_provisioned))
#now test the delete networks
for net_id in nets:
network_context = self._get_network_context(tenant_id,
net_id,
segmentation_id)
self.drv.delete_network_precommit(network_context)
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
expected_num_nets = 0
self.assertEqual(expected_num_nets, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected_num_nets, num_nets_provisioned))
def test_create_and_delete_ports(self):
tenant_id = 'ten-1'
network_id = 'net1-id'
segmentation_id = 1001
vms = ['vm1', 'vm2', 'vm3']
network_context = self._get_network_context(tenant_id,
network_id,
segmentation_id)
self.drv.create_network_precommit(network_context)
for vm_id in vms:
port_context = self._get_port_context(tenant_id,
network_id,
vm_id,
network_context)
self.drv.create_port_precommit(port_context)
vm_list = db.get_vms(tenant_id)
provisioned_vms = len(vm_list)
expected_vms = len(vms)
self.assertEqual(expected_vms, provisioned_vms,
'There should be %d '
'hosts, not %d' % (expected_vms, provisioned_vms))
# Now test the delete ports
for vm_id in vms:
port_context = self._get_port_context(tenant_id,
network_id,
vm_id,
network_context)
self.drv.delete_port_precommit(port_context)
vm_list = db.get_vms(tenant_id)
provisioned_vms = len(vm_list)
expected_vms = 0
self.assertEqual(expected_vms, provisioned_vms,
'There should be %d '
'VMs, not %d' % (expected_vms, provisioned_vms))
def _get_network_context(self, tenant_id, net_id, seg_id):
network = {'id': net_id,
'tenant_id': tenant_id}
network_segments = [{'segmentation_id': seg_id}]
return FakeNetworkContext(network, network_segments, network)
def _get_port_context(self, tenant_id, net_id, vm_id, network):
port = {'device_id': vm_id,
'device_owner': 'compute',
'binding:host_id': 'ubuntu1',
'tenant_id': tenant_id,
'id': 101,
'network_id': net_id
}
return FakePortContext(port, port, network)
class fake_keystone_info_class(object):
"""To generate fake Keystone Authentification token information
Arista Driver expects Keystone auth info. This fake information
is for testing only
"""
auth_protocol = 'abc'
auth_host = 'host'
auth_port = 5000
admin_user = 'neutron'
admin_password = 'fun'
class FakeNetworkContext(object):
"""To generate network context for testing purposes only."""
def __init__(self, network, segments=None, original_network=None):
self._network = network
self._original_network = original_network
self._segments = segments
@property
def current(self):
return self._network
@property
def original(self):
return self._original_network
@property
def network_segments(self):
return self._segments
class FakePortContext(object):
"""To generate port context for testing purposes only."""
def __init__(self, port, original_port, network):
self._port = port
self._original_port = original_port
self._network_context = network
@property
def current(self):
return self._port
@property
def original(self):
return self._original_port
@property
def network(self):
return self._network_context
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description: add work hours
'''
#import sys
from core.people.person import Profile, Session, ProfileTimesheet
from core.utils.sys.report import report_bug
#from core.config import settings
from config.settings import logger
import datetime
#import zmq
class Reaction:
"""Begin register work hours"""
response = ''
request = ''
xmpp = ''
def __str__(self):
return 'Begin register work hours'
@classmethod
def __init__(self, *args, **kwargs):
""" original request string """
#get request object
self.req_obj = kwargs.get('req_obj')
#request word sequence
self.request = self.req_obj.get('request', '')
#request received from (julius, jabber any other resources)
self.req_from = self.req_obj.get('from', '')
#get command history
self.cmd_stack = kwargs.pop('cmd_stack', '')
self.response = ''
#self.xmpp = EchoBot(
#settings.MY_ACCOUNTS['gmail']['email'],
#settings.MY_ACCOUNTS['gmail']['password'],
#)
#self.xmpp.register_plugin('xep_0030') # Service Discovery
#self.xmpp.register_plugin('xep_0045') # Multi-User Chat
#self.xmpp.register_plugin('xep_0199') # XMPP Ping
#self.xmpp.register_plugin('xep_0004') # Data Forms
#self.xmpp.register_plugin('xep_0060') # PubSub
#if self.xmpp.connect():
#self.xmpp.process(threaded=False)
#else:
#logger.info("Unable to connect")
#if self.xmpp.connect():
#self.xmpp.process(threaded=False)
#else:
#logger.error("Unable to connect")
@classmethod
def run(self):
"""default method"""
logger.debug(self.req_obj)
uuid = self.req_obj.get('uuid', '')
sender = self.req_obj.get('sender', '')
#exctract sender email
if sender:
email = sender.split('/')[0]
sess = Session()
self.response = {
'text': "I couldn't find your profile by %s, error happened" % uuid,
'jmsg': "I couldn't find your profile by %s, error happened" % uuid,
'type': 'response'}
#########################################
# check and get profile #
#########################################
if uuid:
try:
profile = sess.query(Profile).filter(Profile.uuid == uuid).one()
except Exception as e:
logger.exception(e)
return self.response
if email:
try:
profile = sess.query(Profile).filter(Profile.email == email).one()
except Exception as e:
logger.exception(e)
return self.response
#request_to_user = 'Hi %s, how many hours are you going to work today?' % profile.first_name
request_to_user = 'how many hours are you going to work'
todo = {'request': request_to_user,
'from': 'jabber',
'type': 'response',
'continue': 1,
'text': request_to_user,
'jmsg': request_to_user,
'sender': str(profile.email)}
#########################################
# If executed by crontab #
#########################################
if self.req_from == 'cron':
#args = self.req_obj.get('cmd_args', '')
logger.info('Sending cron notification to user.')
#logger.info('Trying to connect jabber socket and send a message.')
#context = zmq.Context()
#sock = context.socket(zmq.REQ)
#sock.connect('ipc:///tmp/smarty-jabber')
#sock.send_json({'request': request_to_user,
#'from': 'jabber',
#'type': 'response',
#'continue': 1,
#'sender': str(profile.email)})
#res_obj = sock.recv_json()
#logger.info('======================= response obj ========================')
#logger.debug(res_obj)
#self.xmpp.Message(profile.email, request_to_user)
#self.response = res_obj
if self.req_from == 'jabber':
self.response = todo
if self.req_from == 'julius':
from core.broadcast import say, bang
bang()
todo['type'] = 'response'
todo['say'] = request_to_user
self.response = say(self.request.replace('say', '').upper())
return self.response
@classmethod
def on_continue(self, msg):
"""docstring for on_continue"""
todo = {}
response = "Ok."
request = msg.get('request', None)
sender = msg.get('sender', '')
req_from = msg.get('from', '')
#error = msg.get('error', '')
#logger.info('error %s..........................' % error)
#logger.info('req_from %s..........................' % req_from)
#logger.info('request %s..........................' % request)
sess = Session()
#exctract sender email
email = sender.split('/')[0]
#find user profile by primary email
profile = sess.query(Profile).filter(Profile.email == email).one()
if self.is_correct_format(request):
insert = {}
insert['uuid'] = profile.uuid
insert['type'] = 'custom'
insert['created'] = datetime.datetime.now()
logger.debug('request type %s' % type(request), request)
insert['spent'] = request
try:
ts = ProfileTimesheet(**insert)
sess.add(ts)
sess.commit()
except Exception as e:
sess.rollback()
logger.exception(e)
report_bug(e)
response = 'I could not save it, problem has \
been already reported to developer'
else:
response = " type something like: 8 hours "
todo['continue'] = 1
if req_from == 'jabber':
todo = {'text': response,
'jmsg': response,
'type': 'response',
'continue': 0}
self.response = todo
if req_from == 'julius':
from core.broadcast import say, bang
bang()
todo = {'say': response,
'text': response,
'type': 'response',
'continue': 0}
self.response = say(self.request.replace('say', '').upper())
return self.response
@classmethod
def is_correct_format(self, request):
"""later will parse for correct format """
return request
|
|
# Copyright 2015, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Nodes for unary and binary operations.
No short-circuit involved, boolean 'not' is an unary operation like '-' is,
no real difference.
"""
import math
from nuitka import PythonOperators
from .NodeBases import ExpressionChildrenHavingBase
class ExpressionOperationBase(ExpressionChildrenHavingBase):
inplace_suspect = False
def __init__(self, operator, simulator, values, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values = values,
source_ref = source_ref
)
self.operator = operator
self.simulator = simulator
def markAsInplaceSuspect(self):
self.inplace_suspect = True
def unmarkAsInplaceSuspect(self):
self.inplace_suspect = False
def isInplaceSuspect(self):
return self.inplace_suspect
def getDetail(self):
return self.operator
def getDetails(self):
return {
"operator" : self.operator
}
def getOperator(self):
return self.operator
def getSimulator(self):
return self.simulator
def isKnownToBeIterable(self, count):
# TODO: Could be true, if the arguments said so
return None
class ExpressionOperationBinary(ExpressionOperationBase):
kind = "EXPRESSION_OPERATION_BINARY"
named_children = ("left", "right")
def __init__(self, operator, left, right, source_ref):
assert left.isExpression() and right.isExpression, (left, right)
ExpressionOperationBase.__init__(
self,
operator = operator,
simulator = PythonOperators.binary_operator_functions[ operator ],
values = {
"left" : left,
"right" : right
},
source_ref = source_ref
)
def computeExpression(self, constraint_collection):
# This is using many returns based on many conditions,
# pylint: disable=R0911,R0912
operator = self.getOperator()
operands = self.getOperands()
left, right = operands
if left.willRaiseException(BaseException):
return (
left,
"new_raise",
"Left argument of binary operation raises exception"
)
if right.willRaiseException(BaseException):
from .NodeMakingHelpers import wrapExpressionWithNodeSideEffects
result = wrapExpressionWithNodeSideEffects(
new_node = right,
old_node = left
)
return (
result,
"new_raise",
"Right argument of binary operation raises exception"
)
if left.isCompileTimeConstant() and right.isCompileTimeConstant():
left_value = left.getCompileTimeConstant()
right_value = right.getCompileTimeConstant()
if operator == "Mult" and right.isNumberConstant():
iter_length = left.getIterationLength()
if iter_length is not None:
if iter_length * right_value > 256:
return self, None, None
if left.isNumberConstant():
if left.isIndexConstant() and right.isIndexConstant():
# Estimate with logarithm, if the result of number
# calculations is computable with acceptable effort,
# otherwise, we will have to do it at runtime.
if left_value != 0 and right_value != 0:
if math.log10(abs(left_value)) + math.log10(abs(right_value)) > 20:
return self, None, None
elif operator == "Mult" and left.isNumberConstant():
iter_length = right.getIterationLength()
if iter_length is not None:
if iter_length * left_value > 256:
return self, None, None
elif operator == "Add" and \
left.isKnownToBeIterable(None) and \
right.isKnownToBeIterable(None):
iter_length = left.getIterationLength() + \
right.getIterationLength()
if iter_length > 256:
return self, None, None
from .NodeMakingHelpers import getComputationResult
return getComputationResult(
node = self,
computation = lambda : self.getSimulator()(
left_value,
right_value
),
description = "Operator '%s' with constant arguments." % operator
)
else:
# The value of these nodes escaped and could change its contents.
constraint_collection.removeKnowledge(left)
constraint_collection.removeKnowledge(right)
# Any code could be run, note that.
constraint_collection.onControlFlowEscape(self)
return self, None, None
def getOperands(self):
return (self.getLeft(), self.getRight())
getLeft = ExpressionChildrenHavingBase.childGetter("left")
getRight = ExpressionChildrenHavingBase.childGetter("right")
class ExpressionOperationUnary(ExpressionOperationBase):
kind = "EXPRESSION_OPERATION_UNARY"
named_children = ("operand",)
def __init__(self, operator, operand, source_ref):
assert operand.isExpression(), operand
ExpressionOperationBase.__init__(
self,
operator = operator,
simulator = PythonOperators.unary_operator_functions[ operator ],
values = {
"operand" : operand
},
source_ref = source_ref
)
def computeExpression(self, constraint_collection):
operator = self.getOperator()
operand = self.getOperand()
if operand.isCompileTimeConstant():
operand_value = operand.getCompileTimeConstant()
from .NodeMakingHelpers import getComputationResult
return getComputationResult(
node = self,
computation = lambda : self.getSimulator()(
operand_value,
),
description = "Operator '%s' with constant argument." % operator
)
else:
# The value of that node escapes and could change its contents.
constraint_collection.removeKnowledge(operand)
# Any code could be run, note that.
constraint_collection.onControlFlowEscape(self)
return self, None, None
getOperand = ExpressionChildrenHavingBase.childGetter("operand")
def getOperands(self):
return (self.getOperand(),)
@staticmethod
def isExpressionOperationUnary():
return True
class ExpressionOperationNOT(ExpressionOperationUnary):
kind = "EXPRESSION_OPERATION_NOT"
def __init__(self, operand, source_ref):
ExpressionOperationUnary.__init__(
self,
operator = "Not",
operand = operand,
source_ref = source_ref
)
def getDetails(self):
return {}
def computeExpression(self, constraint_collection):
operand = self.getOperand()
if operand.willRaiseException(BaseException):
return (
operand,
"new_raise",
"Argument of 'not' operation raises exception"
)
return operand.computeExpressionOperationNot(
not_node = self,
constraint_collection = constraint_collection
)
def getTruthValue(self):
result = self.getOperand().getTruthValue()
# Need to invert the truth value of operand of course here.
return None if result is None else not result
def mayHaveSideEffects(self):
operand = self.getOperand()
if operand.mayHaveSideEffects():
return True
return operand.mayHaveSideEffectsBool()
def mayHaveSideEffectsBool(self):
return self.getOperand().mayHaveSideEffectsBool()
def extractSideEffects(self):
operand = self.getOperand()
# TODO: Find the common ground of these, and make it an expression
# method.
if operand.isExpressionMakeSequence():
return operand.extractSideEffects()
if operand.isExpressionMakeDict():
return operand.extractSideEffects()
return (self,)
class ExpressionOperationBinaryInplace(ExpressionOperationBinary):
kind = "EXPRESSION_OPERATION_BINARY_INPLACE"
def __init__(self, operator, left, right, source_ref):
ExpressionOperationBinary.__init__(
self,
operator = operator,
left = left,
right = right,
source_ref = source_ref
)
@staticmethod
def isExpressionOperationBinary():
return True
def computeExpression(self, constraint_collection):
# In-place operation requires extra care to avoid corruption of
# values.
left = self.getLeft()
right = self.getRight()
if left.isCompileTimeConstant():
# Then we made a mistake currently.
assert not left.isMutable(), self
source_ref = self.getSourceReference()
result = ExpressionOperationBinary(
left = left,
right = right,
operator = self.getOperator()[1:],
source_ref = source_ref
)
constraint_collection.signalChange(
tags = "new_expression",
source_ref = source_ref,
message = """\
Lowered in-place binary operation of compile time constant to binary operation."""
)
return result.computeExpression(constraint_collection)
else:
# The value of these nodes escaped and could change its contents.
constraint_collection.removeKnowledge(left)
constraint_collection.removeKnowledge(right)
# Any code could be run, note that.
constraint_collection.onControlFlowEscape(self)
return self, None, None
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph quantization script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.tools.quantization import quantize_graph
flags = tf.app.flags
FLAGS = flags.FLAGS
def run_graph_def(graph_def, input_map, outputs):
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(graph_def, input_map={}, name="")
with tf.Session(graph=graph) as sess:
results = sess.run(outputs, feed_dict=input_map)
return results
def test_mat_mul(m, n, k, a, b):
"""Tests a MatMul replacement."""
a_constant_name = "a_constant"
b_constant_name = "b_constant"
mat_mul_name = "mat_mul"
float_graph_def = tf.GraphDef()
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=a,
dtype=tf.float32,
shape=[m, k])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=b,
dtype=tf.float32,
shape=[k, n])
float_graph_def.node.extend([b_constant])
mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name,
[a_constant_name, b_constant_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T", tf.float32)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False)
float_graph_def.node.extend([mat_mul_node])
test_graph(float_graph_def, {}, [mat_mul_name])
def test_conv(depth, image_width, image_height, image_batch_count, filter_size,
filter_count, stride, padding, input_values, filter_values):
"""Tests a Conv replacement."""
input_constant_name = "input_constant"
filter_constant_name = "filter_constant"
conv_name = "conv"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=input_values,
dtype=tf.float32,
shape=[
image_batch_count, image_height, image_width, depth
])
float_graph_def.node.extend([input_constant])
filter_constant = quantize_graph.create_constant_node(
filter_constant_name,
value=filter_values,
dtype=tf.float32,
shape=[
filter_size, filter_size, depth, filter_count
])
float_graph_def.node.extend([filter_constant])
conv_node = quantize_graph.create_node("Conv2D", conv_name,
[input_constant_name,
filter_constant_name])
quantize_graph.set_attr_dtype(conv_node, "T", tf.float32)
quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1])
quantize_graph.set_attr_string(conv_node, "padding", padding)
float_graph_def.node.extend([conv_node])
test_graph(float_graph_def, {}, [conv_name])
def are_tensors_near(a, b, tolerance):
"""Tests whether two tensors are nearly identical.
This is a specialized comparison function designed to help debug problems with
quantization. It prints out information about the differences between tensors
on failure, paying special attention to possible biases by looking at the mean
and absolute average errors.
Args:
a: First comparison tensor.
b: Second comparison tensor.
tolerance: Float value indicating how large an error between values is ok.
Returns:
Boolean indicating whether the two inputs were close enough.
"""
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
print("Tensors are different sizes: " + str(len(flat_a)) + " vs " +
str(len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
total_difference = 0
total_abs_difference = 0
for index in range(value_count):
a_value = flat_a[index]
b_value = flat_b[index]
difference = a_value - b_value
total_difference += difference
total_abs_difference += abs(difference)
if abs(difference) > tolerance:
how_many_different += 1
mean_difference = total_difference / value_count
mean_abs_difference = total_abs_difference / value_count
proportion_different = (how_many_different * 1.0) / value_count
if how_many_different == 0:
return True
else:
print("Tensors have {0} different values ({1}%), with mean difference"
" {2} and mean absolute difference {3}".format(
how_many_different, proportion_different * 100, mean_difference,
mean_abs_difference))
return False
def get_top_value(input_values):
max_value = None
max_index = None
for index, value in enumerate(input_values.flatten()):
if max_value is None or value > max:
max_value = value
max_index = index
return max_index, max_value
def test_graph(float_graph_def, input_map, output_names):
"""Runs the float graph through the rewriter and tests the results."""
float_results = run_graph_def(float_graph_def, input_map,
[output_name + ":0"
for output_name in output_names])
# TODO(petewarden): round test is currently failing because there is no
# RoundToSteps op available.
# round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round")
# round_graph_def = round_rewriter.rewrite(output_name)
# round_results = run_graph_def(round_graph_def, input_map,
# [output_name + ":0"])
# assert are_tensors_near(expected, round_results[0], 1.0)
#
# TODO(petewarden): Add test for "quantize" mode.
eightbit_rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit")
eightbit_graph_def = eightbit_rewriter.rewrite(output_names)
eightbit_results = run_graph_def(eightbit_graph_def, input_map,
[output_name + ":0"
for output_name in output_names])
for expected, result in zip(float_results, eightbit_results):
assert are_tensors_near(expected, result, 1.0)
# Test the weights_rounded mode. This uses the default bit_depth.
weights_rounded_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "weights_rounded")
weights_rounded_graph_def = weights_rounded_rewriter.rewrite(output_names)
weights_rounded_results = run_graph_def(weights_rounded_graph_def, input_map,
[output_name + ":0"
for output_name in output_names])
for expected, result in zip(float_results, weights_rounded_results):
assert are_tensors_near(expected, result, 1.0)
class QuantizeGraphTest(tf.test.TestCase):
def test_negative_const_problem(self):
shape_constant_name = "shape_constant"
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=-0.8, dtype=tf.float32, shape=[1])
quantization_result = quantize_graph.quantize_weight_eightbit(
shape_constant, b"MIN_COMBINED")
self.assertEqual(4, len(quantization_result))
def test_odd_padding_problem(self):
"""Tests one error case we ran into in a real graph."""
test_conv(1, 4, 4, 1, 3, 1, 2, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_mat_mul_tiny(self):
# These tests are added to test the generate case where
# min(matrix) == max(matrix), which used to cause problems.
test_mat_mul(1, 1, 1, [2], [3])
test_mat_mul(1, 2, 1, [1], [2, 3])
test_mat_mul(1, 1, 2, [1, 1], [1, 1])
test_mat_mul(1, 1, 2, [0, 0], [1, 1])
# The general case.
test_mat_mul(1, 1, 2, [1, 2], [1, 2])
def test_mat_mul_small(self):
test_mat_mul(2, 4, 3, [1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
def test_conv(self):
test_conv(1, 4, 3, 1, 3, 1, 1, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[1, 4, 7, 2, 5, 8, 3, 6, 9])
def test_reshape(self):
"""Tests that MatMul->Reshape->MatMul avoids extra quantize/dequantize."""
def make_matmul(name, a, b):
n = quantize_graph.create_node("MatMul", name, [a.name, b.name])
quantize_graph.set_attr_dtype(n, "T", tf.float32)
quantize_graph.set_attr_bool(n, "transpose_a", False)
quantize_graph.set_attr_bool(n, "transpose_b", False)
return n
# matmul_1 = input*weight_1
input_node = quantize_graph.create_constant_node(
"input", value=[0, 1, 2, 3], dtype=tf.float32, shape=[4, 1])
weight_1_node = quantize_graph.create_constant_node(
"weight_1", value=[.5, .6, .7, .8, .9], dtype=tf.float32, shape=[1, 5])
matmul_1_node = make_matmul("matmul_1", input_node, weight_1_node)
# Reshape 4x5 to 10x2.
new_shape_node = quantize_graph.create_constant_node(
"new_shape_node", value=[10, 2], dtype=tf.int32, shape=[2])
reshape_node = quantize_graph.create_node(
"Reshape", "reshape", [matmul_1_node.name, new_shape_node.name])
quantize_graph.set_attr_dtype(reshape_node, "T", tf.float32)
# matmul_2_node = reshape*weight_2
weight_2_node = quantize_graph.create_constant_node(
"weight_2", value=[1.5, 2.5], dtype=tf.float32, shape=[2, 1])
matmul_2_node = make_matmul("matmul_2", reshape_node, weight_2_node)
g = tf.GraphDef()
g.node.extend([input_node, weight_1_node, matmul_1_node,
new_shape_node, reshape_node, weight_2_node,
matmul_2_node])
# Test the graph
test_graph(g, {}, ["matmul_2"])
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(g, "eightbit")
eightbit_graph_def = eightbit_rewriter.rewrite(["matmul_2"])
tf.logging.info("S:\n%s", str(eightbit_graph_def))
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_quantize_array(self):
# Test invalid parameters (empty array, or 0 buckets.
self.assertRaises(ValueError, quantize_graph.quantize_array,
np.array([]), 2)
self.assertRaises(ValueError, quantize_graph.quantize_array,
np.array([1, 2]), 0)
# Test input array of length 1.
arr = np.array([1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertEqual(arr, qarr)
qarr = quantize_graph.quantize_array(arr, 2)
self.assertEqual(arr, qarr)
# Test input array with all elements equal.
arr = np.array([1, 1, 1])
qarr = quantize_graph.quantize_array(arr, 10)
self.assertTrue((np.array([1, 1, 1]) == qarr).all())
# Test "normal" input arrays.
arr = np.array([0, 0.3, 0.6, 1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertTrue((np.array([0.5, 0.5, 0.5, 0.5]) == qarr).all())
qarr = quantize_graph.quantize_array(arr, 2)
self.assertTrue((np.array([0.25, 0.25, 0.75, 0.75]) == qarr).all())
qarr = quantize_graph.quantize_array(arr.reshape((2, 2)), 2)
self.assertTrue((np.array([[0.25, 0.25], [0.75, 0.75]]) == qarr).all())
def test_concat(self):
shape_constant_name = "shape_constant"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
concat_name = "concat"
float_graph_def = tf.GraphDef()
shape_constant = quantize_graph.create_constant_node(shape_constant_name,
value=0,
dtype=tf.int32,
shape=[])
float_graph_def.node.extend([shape_constant])
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=[1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12],
dtype=tf.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=[13, 14, 15, 16, 17,
18, 19, 20, 21, 22,
23, 24],
dtype=tf.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([b_constant])
concat_node = quantize_graph.create_node("Concat", concat_name,
[shape_constant_name,
a_constant_name, b_constant_name])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", tf.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_multiple_outputs(self):
input_constant_name = "input_constant"
split_constant_name = "split_constant"
split_name = "split"
concat_constant_name = "concat_constant"
concat_name = "concat"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
split_constant = quantize_graph.create_constant_node(split_constant_name,
value=1,
dtype=tf.int32,
shape=[])
float_graph_def.node.extend([split_constant])
split_node = quantize_graph.create_node("Split", split_name,
[split_constant_name,
input_constant_name])
quantize_graph.set_attr_int(split_node, "num_split", 2)
quantize_graph.set_attr_dtype(split_node, "T", tf.float32)
float_graph_def.node.extend([split_node])
concat_constant = quantize_graph.create_constant_node(concat_constant_name,
value=1,
dtype=tf.int32,
shape=[])
float_graph_def.node.extend([concat_constant])
concat_node = quantize_graph.create_node("Concat", concat_name,
[concat_constant_name,
split_name + ":0",
split_name + ":1"])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", tf.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_node_name_from_input(self):
self.assertEqual("SomeName",
quantize_graph.node_name_from_input("^SomeName:2"))
def test_unique_node_name_from_input(self):
self.assertEqual("__hat__SomeName__port__2",
quantize_graph.unique_node_name_from_input("^SomeName:2"))
def test_identity(self):
input_constant_name = "input_constant"
identity_name = "identity"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
identity_node = quantize_graph.create_node("Identity", identity_name,
[input_constant_name])
quantize_graph.set_attr_dtype(identity_node, "T", tf.float32)
float_graph_def.node.extend([identity_node])
mul_name = "mul"
mul_node = quantize_graph.create_node("Mul", mul_name,
[identity_name, identity_name])
quantize_graph.set_attr_dtype(mul_node, "T", tf.float32)
float_graph_def.node.extend([mul_node])
test_graph(float_graph_def, {}, [mul_name])
def test_keep_control_edges(self):
no_op_name = "no_op"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = tf.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
graph_def.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=1,
dtype=tf.float32,
shape=[])
graph_def.node.extend([a_constant])
a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = quantize_graph.create_node("Identity", a_identity_name,
[a_constant_name,
"^" + a_check_name,
"^" + no_op_name])
graph_def.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=1,
dtype=tf.float32,
shape=[])
graph_def.node.extend([b_constant])
b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = quantize_graph.create_node("Identity", b_identity_name,
[b_constant_name,
"^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name,
b_identity_name])
quantize_graph.set_attr_dtype(add_node, "T", tf.float32)
graph_def.node.extend([add_node])
expected_output = tf.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
expected_output.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=1,
dtype=tf.float32,
shape=[])
expected_output.node.extend([a_constant])
a_identity_node = quantize_graph.create_node("Identity", a_identity_name,
[a_constant_name,
"^" + no_op_name])
expected_output.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=1,
dtype=tf.float32,
shape=[])
expected_output.node.extend([b_constant])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name,
b_constant_name])
quantize_graph.set_attr_dtype(add_node, "T", tf.float32)
expected_output.node.extend([add_node])
output = graph_util.remove_training_nodes(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [add_name])
self.assertProtoEquals(expected_output, stripped_output)
def test_batch_norm(self):
input_constant_name = "input_constant"
mean_constant_name = "mean_constant"
variance_constant_name = "variance_constant"
beta_constant_name = "beta_constant"
gamma_constant_name = "gamma_constant"
batch_norm_name = "batch_norm"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 4, 2, 5, 3,
6, -1, -4, -2,
-5, -3, -6],
dtype=tf.float32,
shape=[1, 1, 6, 2])
float_graph_def.node.extend([input_constant])
mean_constant = quantize_graph.create_constant_node(mean_constant_name,
value=[10, 20],
dtype=tf.float32,
shape=[2])
float_graph_def.node.extend([mean_constant])
variance_constant = quantize_graph.create_constant_node(
variance_constant_name, value=[0.25, 0.5], dtype=tf.float32, shape=[2])
float_graph_def.node.extend([variance_constant])
beta_constant = quantize_graph.create_constant_node(beta_constant_name,
value=[0.1, 0.6],
dtype=tf.float32,
shape=[2])
float_graph_def.node.extend([beta_constant])
gamma_constant = quantize_graph.create_constant_node(gamma_constant_name,
value=[0, 0],
dtype=tf.float32,
shape=[2])
float_graph_def.node.extend([gamma_constant])
batch_norm_node = quantize_graph.create_node(
"BatchNormWithGlobalNormalization", batch_norm_name,
[input_constant_name, mean_constant_name, variance_constant_name,
beta_constant_name, gamma_constant_name])
quantize_graph.set_attr_dtype(batch_norm_node, "T", tf.float32)
quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization",
False)
quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001)
float_graph_def.node.extend([batch_norm_node])
test_graph(float_graph_def, {}, [batch_norm_name])
def test_max_pool(self):
input_constant_name = "input_constant"
max_pool_name = "max_pool"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
max_pool_node = quantize_graph.create_node("MaxPool", max_pool_name,
[input_constant_name])
quantize_graph.set_attr_int_list(max_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(max_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(max_pool_node, "padding", b"SAME")
float_graph_def.node.extend([max_pool_node])
test_graph(float_graph_def, {}, [max_pool_name])
def test_avg_pool(self):
input_constant_name = "input_constant"
avg_pool_name = "avg_pool"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name,
[input_constant_name])
quantize_graph.set_attr_dtype(avg_pool_node, "T", tf.float32)
quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME")
float_graph_def.node.extend([avg_pool_node])
test_graph(float_graph_def, {}, [avg_pool_name])
def test_relu(self):
input_constant_name = "input_constant"
relu_name = "relu"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu_node = quantize_graph.create_node("Relu", relu_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu_node, "T", tf.float32)
float_graph_def.node.extend([relu_node])
test_graph(float_graph_def, {}, [relu_name])
def test_relu6(self):
input_constant_name = "input_constant"
relu6_name = "relu6"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu6_node = quantize_graph.create_node("Relu6", relu6_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu6_node, "T", tf.float32)
float_graph_def.node.extend([relu6_node])
test_graph(float_graph_def, {}, [relu6_name])
def test_bias_add(self):
input_constant_name = "input_constant"
offset_constant_name = "offset_constant"
bias_add_name = "bias_add"
float_graph_def = tf.GraphDef()
input_constant = quantize_graph.create_constant_node(input_constant_name,
value=[1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12],
dtype=tf.float32,
shape=[1, 1, 2, 6])
float_graph_def.node.extend([input_constant])
offset_constant = quantize_graph.create_constant_node(offset_constant_name,
value=[1, 2, 3, 4, 5,
6],
dtype=tf.float32,
shape=[6])
float_graph_def.node.extend([offset_constant])
bias_add_node = quantize_graph.create_node("BiasAdd", bias_add_name,
[input_constant_name,
offset_constant_name])
quantize_graph.set_attr_dtype(bias_add_node, "T", tf.float32)
float_graph_def.node.extend([bias_add_node])
test_graph(float_graph_def, {}, [bias_add_name])
def test_remove_redundant_quantization(self):
a_constant_name = "a_constant"
a_constant_min_name = "a_constant_min"
a_constant_max_name = "a_constant_max"
a_dequantize_name = "a_dequantize"
a_quantize_name = "a_quantize"
b_constant_name = "b_constant"
b_constant_min_name = "b_constant_min"
b_constant_max_name = "b_constant_max"
b_dequantize_name = "b_dequantize"
b_quantize_name = "b_quantize"
mat_mul_name = "mat_mul"
graph_def = tf.GraphDef()
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=(0,),
dtype=tf.quint8,
shape=[])
graph_def.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(a_constant_min_name,
value=2,
dtype=tf.float32,
shape=[])
graph_def.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(a_constant_max_name,
value=2,
dtype=tf.float32,
shape=[])
graph_def.node.extend([a_constant_max])
a_dequantize_node = quantize_graph.create_node("Dequantize",
a_dequantize_name,
[a_constant_name,
a_constant_min_name,
a_constant_max_name])
quantize_graph.set_attr_dtype(a_dequantize_node, "T", tf.uint8)
graph_def.node.extend([a_dequantize_node])
a_quantize_node = quantize_graph.create_node("QuantizeV2",
a_quantize_name,
[a_dequantize_name,
a_dequantize_name + ":1",
a_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(a_quantize_node, "T", tf.uint8)
graph_def.node.extend([a_quantize_node])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=(0,),
dtype=tf.quint8,
shape=[])
graph_def.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(b_constant_min_name,
value=3,
dtype=tf.float32,
shape=[])
graph_def.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(b_constant_max_name,
value=3,
dtype=tf.float32,
shape=[])
graph_def.node.extend([b_constant_max])
b_dequantize_node = quantize_graph.create_node("Dequantize",
b_dequantize_name,
[b_constant_name,
b_constant_min_name,
b_constant_max_name])
quantize_graph.set_attr_dtype(b_dequantize_node, "T", tf.uint8)
graph_def.node.extend([b_dequantize_node])
b_quantize_node = quantize_graph.create_node("QuantizeV2",
b_quantize_name,
[b_dequantize_name,
b_dequantize_name + ":1",
b_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(b_quantize_node, "T", tf.uint8)
graph_def.node.extend([b_quantize_node])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name,
[a_quantize_name,
b_quantize_name,
a_quantize_name + ":1",
a_quantize_name + ":2",
b_quantize_name + ":1",
b_quantize_name + ":2"])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", tf.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", tf.int32)
graph_def.node.extend([mat_mul_node])
expected_output = tf.GraphDef()
a_constant = quantize_graph.create_constant_node(a_constant_name,
value=(0,),
dtype=tf.quint8,
shape=[])
expected_output.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(a_constant_min_name,
value=2,
dtype=tf.float32,
shape=[])
expected_output.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(a_constant_max_name,
value=2,
dtype=tf.float32,
shape=[])
expected_output.node.extend([a_constant_max])
b_constant = quantize_graph.create_constant_node(b_constant_name,
value=(0,),
dtype=tf.quint8,
shape=[])
expected_output.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(b_constant_min_name,
value=3,
dtype=tf.float32,
shape=[])
expected_output.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(b_constant_max_name,
value=3,
dtype=tf.float32,
shape=[])
expected_output.node.extend([b_constant_max])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name,
[a_constant_name,
b_constant_name,
a_constant_min_name,
a_constant_max_name,
b_constant_min_name,
b_constant_max_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", tf.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", tf.int32)
expected_output.node.extend([mat_mul_node])
rewriter = quantize_graph.GraphRewriter(graph_def, [mat_mul_name])
output = rewriter.remove_redundant_quantization(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [mat_mul_name])
self.assertProtoEquals(expected_output, stripped_output)
if __name__ == "__main__":
tf.test.main()
|
|
"""
Test for Nest sensors platform for the Smart Device Management API.
These tests fake out the subscriber/devicemanager, and are not using a real
pubsub subscriber.
"""
from google_nest_sdm.device import Device
from google_nest_sdm.event import EventMessage
from homeassistant.components.sensor import ATTR_STATE_CLASS, STATE_CLASS_MEASUREMENT
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.helpers import device_registry as dr, entity_registry as er
from .common import async_setup_sdm_platform
PLATFORM = "sensor"
THERMOSTAT_TYPE = "sdm.devices.types.THERMOSTAT"
async def async_setup_sensor(hass, devices={}, structures={}):
"""Set up the platform and prerequisites."""
return await async_setup_sdm_platform(hass, PLATFORM, devices, structures)
async def test_thermostat_device(hass):
"""Test a thermostat with temperature and humidity sensors."""
devices = {
"some-device-id": Device.MakeDevice(
{
"name": "some-device-id",
"type": THERMOSTAT_TYPE,
"traits": {
"sdm.devices.traits.Info": {
"customName": "My Sensor",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.1,
},
"sdm.devices.traits.Humidity": {
"ambientHumidityPercent": 35.0,
},
},
},
auth=None,
)
}
await async_setup_sensor(hass, devices)
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is not None
assert temperature.state == "25.1"
assert temperature.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert temperature.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
assert temperature.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_MEASUREMENT
humidity = hass.states.get("sensor.my_sensor_humidity")
assert humidity is not None
assert humidity.state == "35"
assert humidity.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert humidity.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_HUMIDITY
assert humidity.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_MEASUREMENT
registry = er.async_get(hass)
entry = registry.async_get("sensor.my_sensor_temperature")
assert entry.unique_id == "some-device-id-temperature"
assert entry.original_name == "My Sensor Temperature"
assert entry.domain == "sensor"
entry = registry.async_get("sensor.my_sensor_humidity")
assert entry.unique_id == "some-device-id-humidity"
assert entry.original_name == "My Sensor Humidity"
assert entry.domain == "sensor"
device_registry = dr.async_get(hass)
device = device_registry.async_get(entry.device_id)
assert device.name == "My Sensor"
assert device.model == "Thermostat"
assert device.identifiers == {("nest", "some-device-id")}
async def test_no_devices(hass):
"""Test no devices returned by the api."""
await async_setup_sensor(hass)
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is None
humidity = hass.states.get("sensor.my_sensor_humidity")
assert humidity is None
async def test_device_no_sensor_traits(hass):
"""Test a device with applicable sensor traits."""
devices = {
"some-device-id": Device.MakeDevice(
{
"name": "some-device-id",
"type": THERMOSTAT_TYPE,
"traits": {},
},
auth=None,
)
}
await async_setup_sensor(hass, devices)
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is None
humidity = hass.states.get("sensor.my_sensor_humidity")
assert humidity is None
async def test_device_name_from_structure(hass):
"""Test a device without a custom name, inferring name from structure."""
devices = {
"some-device-id": Device.MakeDevice(
{
"name": "some-device-id",
"type": THERMOSTAT_TYPE,
"traits": {
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.2,
},
},
"parentRelations": [
{"parent": "some-structure-id", "displayName": "Some Room"}
],
},
auth=None,
)
}
await async_setup_sensor(hass, devices)
temperature = hass.states.get("sensor.some_room_temperature")
assert temperature is not None
assert temperature.state == "25.2"
async def test_event_updates_sensor(hass):
"""Test a pubsub message received by subscriber to update temperature."""
devices = {
"some-device-id": Device.MakeDevice(
{
"name": "some-device-id",
"type": THERMOSTAT_TYPE,
"traits": {
"sdm.devices.traits.Info": {
"customName": "My Sensor",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.1,
},
},
},
auth=None,
)
}
subscriber = await async_setup_sensor(hass, devices)
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is not None
assert temperature.state == "25.1"
# Simulate a pubsub message received by the subscriber with a trait update
event = EventMessage(
{
"eventId": "some-event-id",
"timestamp": "2019-01-01T00:00:01Z",
"resourceUpdate": {
"name": "some-device-id",
"traits": {
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 26.2,
},
},
},
},
auth=None,
)
await subscriber.async_receive_event(event)
await hass.async_block_till_done() # Process dispatch/update signal
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is not None
assert temperature.state == "26.2"
async def test_device_with_unknown_type(hass):
"""Test a device without a custom name, inferring name from structure."""
devices = {
"some-device-id": Device.MakeDevice(
{
"name": "some-device-id",
"type": "some-unknown-type",
"traits": {
"sdm.devices.traits.Info": {
"customName": "My Sensor",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.1,
},
},
},
auth=None,
)
}
await async_setup_sensor(hass, devices)
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature is not None
assert temperature.state == "25.1"
registry = er.async_get(hass)
entry = registry.async_get("sensor.my_sensor_temperature")
assert entry.unique_id == "some-device-id-temperature"
assert entry.original_name == "My Sensor Temperature"
assert entry.domain == "sensor"
device_registry = dr.async_get(hass)
device = device_registry.async_get(entry.device_id)
assert device.name == "My Sensor"
assert device.model is None
assert device.identifiers == {("nest", "some-device-id")}
async def test_temperature_rounding(hass):
"""Test the rounding of overly precise temperatures."""
devices = {
"some-device-id": Device.MakeDevice(
{
"name": "some-device-id",
"type": THERMOSTAT_TYPE,
"traits": {
"sdm.devices.traits.Info": {
"customName": "My Sensor",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 25.15678,
},
},
},
auth=None,
)
}
await async_setup_sensor(hass, devices)
temperature = hass.states.get("sensor.my_sensor_temperature")
assert temperature.state == "25.2"
|
|
from datetime import (
datetime,
timedelta,
)
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.common import is_datetime64_any_dtype
from pandas import (
DatetimeIndex,
DatetimeTZDtype,
Index,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
isna,
offsets,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
@pytest.mark.parametrize(
"nat,idx",
[
(Timestamp("NaT"), DatetimeArray),
(Timedelta("NaT"), TimedeltaArray),
(Period("NaT", freq="M"), PeriodArray),
],
)
def test_nat_fields(nat, idx):
for field in idx._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(NaT, field)
assert np.isnan(result)
result = getattr(nat, field)
assert np.isnan(result)
for field in idx._bool_ops:
result = getattr(NaT, field)
assert result is False
result = getattr(nat, field)
assert result is False
def test_nat_vector_field_access():
idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"])
for field in DatetimeArray._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
if field in ["week", "weekofyear"]:
# GH#33595 Deprecate week and weekofyear
continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
tm.assert_index_equal(result, expected)
ser = Series(idx)
for field in DatetimeArray._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
if field in ["week", "weekofyear"]:
# GH#33595 Deprecate week and weekofyear
continue
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
for field in DatetimeArray._bool_ops:
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
@pytest.mark.parametrize("value", [None, np.nan, iNaT, float("nan"), NaT, "NaT", "nat"])
def test_identity(klass, value):
assert klass(value) is NaT
@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
@pytest.mark.parametrize("value", ["", "nat", "NAT", None, np.nan])
def test_equality(klass, value, request):
if klass is Period and value == "":
request.node.add_marker(
pytest.mark.xfail(reason="Period cannot parse empty string")
)
assert klass(value).value == iNaT
@pytest.mark.parametrize("klass", [Timestamp, Timedelta])
@pytest.mark.parametrize("method", ["round", "floor", "ceil"])
@pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"])
def test_round_nat(klass, method, freq):
# see gh-14940
ts = klass("nat")
round_method = getattr(ts, method)
assert round_method(freq) is ts
@pytest.mark.parametrize(
"method",
[
"astimezone",
"combine",
"ctime",
"dst",
"fromordinal",
"fromtimestamp",
"fromisocalendar",
"isocalendar",
"strftime",
"strptime",
"time",
"timestamp",
"timetuple",
"timetz",
"toordinal",
"tzname",
"utcfromtimestamp",
"utcnow",
"utcoffset",
"utctimetuple",
"timestamp",
],
)
def test_nat_methods_raise(method):
# see gh-9513, gh-17329
msg = f"NaTType does not support {method}"
with pytest.raises(ValueError, match=msg):
getattr(NaT, method)()
@pytest.mark.parametrize("method", ["weekday", "isoweekday"])
def test_nat_methods_nan(method):
# see gh-9513, gh-17329
assert np.isnan(getattr(NaT, method)())
@pytest.mark.parametrize(
"method", ["date", "now", "replace", "today", "tz_convert", "tz_localize"]
)
def test_nat_methods_nat(method):
# see gh-8254, gh-9513, gh-17329
assert getattr(NaT, method)() is NaT
@pytest.mark.parametrize(
"get_nat", [lambda x: NaT, lambda x: Timedelta(x), lambda x: Timestamp(x)]
)
def test_nat_iso_format(get_nat):
# see gh-12300
assert get_nat("NaT").isoformat() == "NaT"
assert get_nat("NaT").isoformat(timespec="nanoseconds") == "NaT"
@pytest.mark.parametrize(
"klass,expected",
[
(Timestamp, ["freqstr", "normalize", "to_julian_date", "to_period", "tz"]),
(
Timedelta,
[
"components",
"delta",
"is_populated",
"resolution_string",
"to_pytimedelta",
"to_timedelta64",
"view",
],
),
],
)
def test_missing_public_nat_methods(klass, expected):
# see gh-17327
#
# NaT should have *most* of the Timestamp and Timedelta methods.
# Here, we check which public methods NaT does not have. We
# ignore any missing private methods.
nat_names = dir(NaT)
klass_names = dir(klass)
missing = [x for x in klass_names if x not in nat_names and not x.startswith("_")]
missing.sort()
assert missing == expected
def _get_overlap_public_nat_methods(klass, as_tuple=False):
"""
Get overlapping public methods between NaT and another class.
Parameters
----------
klass : type
The class to compare with NaT
as_tuple : bool, default False
Whether to return a list of tuples of the form (klass, method).
Returns
-------
overlap : list
"""
nat_names = dir(NaT)
klass_names = dir(klass)
overlap = [
x
for x in nat_names
if x in klass_names and not x.startswith("_") and callable(getattr(klass, x))
]
# Timestamp takes precedence over Timedelta in terms of overlap.
if klass is Timedelta:
ts_names = dir(Timestamp)
overlap = [x for x in overlap if x not in ts_names]
if as_tuple:
overlap = [(klass, method) for method in overlap]
overlap.sort()
return overlap
@pytest.mark.parametrize(
"klass,expected",
[
(
Timestamp,
[
"astimezone",
"ceil",
"combine",
"ctime",
"date",
"day_name",
"dst",
"floor",
"fromisocalendar",
"fromisoformat",
"fromordinal",
"fromtimestamp",
"isocalendar",
"isoformat",
"isoweekday",
"month_name",
"now",
"replace",
"round",
"strftime",
"strptime",
"time",
"timestamp",
"timetuple",
"timetz",
"to_datetime64",
"to_numpy",
"to_pydatetime",
"today",
"toordinal",
"tz_convert",
"tz_localize",
"tzname",
"utcfromtimestamp",
"utcnow",
"utcoffset",
"utctimetuple",
"weekday",
],
),
(Timedelta, ["total_seconds"]),
],
)
def test_overlap_public_nat_methods(klass, expected):
# see gh-17327
#
# NaT should have *most* of the Timestamp and Timedelta methods.
# In case when Timestamp, Timedelta, and NaT are overlap, the overlap
# is considered to be with Timestamp and NaT, not Timedelta.
assert _get_overlap_public_nat_methods(klass) == expected
@pytest.mark.parametrize(
"compare",
(
_get_overlap_public_nat_methods(Timestamp, True)
+ _get_overlap_public_nat_methods(Timedelta, True)
),
)
def test_nat_doc_strings(compare):
# see gh-17327
#
# The docstrings for overlapping methods should match.
klass, method = compare
klass_doc = getattr(klass, method).__doc__
# Ignore differences with Timestamp.isoformat() as they're intentional
if klass == Timestamp and method == "isoformat":
return
if method == "to_numpy":
# GH#44460 can return either dt64 or td64 depending on dtype,
# different docstring is intentional
return
nat_doc = getattr(NaT, method).__doc__
assert klass_doc == nat_doc
_ops = {
"left_plus_right": lambda a, b: a + b,
"right_plus_left": lambda a, b: b + a,
"left_minus_right": lambda a, b: a - b,
"right_minus_left": lambda a, b: b - a,
"left_times_right": lambda a, b: a * b,
"right_times_left": lambda a, b: b * a,
"left_div_right": lambda a, b: a / b,
"right_div_left": lambda a, b: b / a,
}
@pytest.mark.parametrize("op_name", list(_ops.keys()))
@pytest.mark.parametrize(
"value,val_type",
[
(2, "scalar"),
(1.5, "floating"),
(np.nan, "floating"),
("foo", "str"),
(timedelta(3600), "timedelta"),
(Timedelta("5s"), "timedelta"),
(datetime(2014, 1, 1), "timestamp"),
(Timestamp("2014-01-01"), "timestamp"),
(Timestamp("2014-01-01", tz="UTC"), "timestamp"),
(Timestamp("2014-01-01", tz="US/Eastern"), "timestamp"),
(pytz.timezone("Asia/Tokyo").localize(datetime(2014, 1, 1)), "timestamp"),
],
)
def test_nat_arithmetic_scalar(op_name, value, val_type):
# see gh-6873
invalid_ops = {
"scalar": {"right_div_left"},
"floating": {
"right_div_left",
"left_minus_right",
"right_minus_left",
"left_plus_right",
"right_plus_left",
},
"str": set(_ops.keys()),
"timedelta": {"left_times_right", "right_times_left"},
"timestamp": {
"left_times_right",
"right_times_left",
"left_div_right",
"right_div_left",
},
}
op = _ops[op_name]
if op_name in invalid_ops.get(val_type, set()):
if (
val_type == "timedelta"
and "times" in op_name
and isinstance(value, Timedelta)
):
typs = "(Timedelta|NaTType)"
msg = rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'"
elif val_type == "str":
# un-specific check here because the message comes from str
# and varies by method
msg = "|".join(
[
"can only concatenate str",
"unsupported operand type",
"can't multiply sequence",
"Can't convert 'NaTType'",
"must be str, not NaTType",
]
)
else:
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
op(NaT, value)
else:
if val_type == "timedelta" and "div" in op_name:
expected = np.nan
else:
expected = NaT
assert op(NaT, value) is expected
@pytest.mark.parametrize(
"val,expected", [(np.nan, NaT), (NaT, np.nan), (np.timedelta64("NaT"), np.nan)]
)
def test_nat_rfloordiv_timedelta(val, expected):
# see gh-#18846
#
# See also test_timedelta.TestTimedeltaArithmetic.test_floordiv
td = Timedelta(hours=3, minutes=4)
assert td // val is expected
@pytest.mark.parametrize(
"op_name",
["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"],
)
@pytest.mark.parametrize(
"value",
[
DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"),
DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"]),
DatetimeArray._from_sequence(
["2011-01-01", "2011-01-02"], dtype=DatetimeTZDtype(tz="US/Pacific")
),
TimedeltaIndex(["1 day", "2 day"], name="x"),
],
)
def test_nat_arithmetic_index(op_name, value):
# see gh-11718
exp_name = "x"
exp_data = [NaT] * 2
if is_datetime64_any_dtype(value.dtype) and "plus" in op_name:
expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name)
else:
expected = TimedeltaIndex(exp_data, name=exp_name)
if not isinstance(value, Index):
expected = expected.array
op = _ops[op_name]
result = op(NaT, value)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"op_name",
["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"],
)
@pytest.mark.parametrize("box", [TimedeltaIndex, Series, TimedeltaArray._from_sequence])
def test_nat_arithmetic_td64_vector(op_name, box):
# see gh-19124
vec = box(["1 day", "2 day"], dtype="timedelta64[ns]")
box_nat = box([NaT, NaT], dtype="timedelta64[ns]")
tm.assert_equal(_ops[op_name](vec, NaT), box_nat)
@pytest.mark.parametrize(
"dtype,op,out_dtype",
[
("datetime64[ns]", operator.add, "datetime64[ns]"),
("datetime64[ns]", roperator.radd, "datetime64[ns]"),
("datetime64[ns]", operator.sub, "timedelta64[ns]"),
("datetime64[ns]", roperator.rsub, "timedelta64[ns]"),
("timedelta64[ns]", operator.add, "datetime64[ns]"),
("timedelta64[ns]", roperator.radd, "datetime64[ns]"),
("timedelta64[ns]", operator.sub, "datetime64[ns]"),
("timedelta64[ns]", roperator.rsub, "timedelta64[ns]"),
],
)
def test_nat_arithmetic_ndarray(dtype, op, out_dtype):
other = np.arange(10).astype(dtype)
result = op(NaT, other)
expected = np.empty(other.shape, dtype=out_dtype)
expected.fill("NaT")
tm.assert_numpy_array_equal(result, expected)
def test_nat_pinned_docstrings():
# see gh-17327
assert NaT.ctime.__doc__ == datetime.ctime.__doc__
def test_to_numpy_alias():
# GH 24653: alias .to_numpy() for scalars
expected = NaT.to_datetime64()
result = NaT.to_numpy()
assert isna(expected) and isna(result)
# GH#44460
result = NaT.to_numpy("M8[s]")
assert isinstance(result, np.datetime64)
assert result.dtype == "M8[s]"
result = NaT.to_numpy("m8[ns]")
assert isinstance(result, np.timedelta64)
assert result.dtype == "m8[ns]"
result = NaT.to_numpy("m8[s]")
assert isinstance(result, np.timedelta64)
assert result.dtype == "m8[s]"
with pytest.raises(ValueError, match="NaT.to_numpy dtype must be a "):
NaT.to_numpy(np.int64)
@pytest.mark.parametrize(
"other",
[
Timedelta(0),
Timedelta(0).to_pytimedelta(),
pytest.param(
Timedelta(0).to_timedelta64(),
marks=pytest.mark.xfail(
reason="td64 doesn't return NotImplemented, see numpy#17017"
),
),
Timestamp(0),
Timestamp(0).to_pydatetime(),
pytest.param(
Timestamp(0).to_datetime64(),
marks=pytest.mark.xfail(
reason="dt64 doesn't return NotImplemented, see numpy#17017"
),
),
Timestamp(0).tz_localize("UTC"),
NaT,
],
)
def test_nat_comparisons(compare_operators_no_eq_ne, other):
# GH 26039
opname = compare_operators_no_eq_ne
assert getattr(NaT, opname)(other) is False
op = getattr(operator, opname.strip("_"))
assert op(NaT, other) is False
assert op(other, NaT) is False
@pytest.mark.parametrize("other", [np.timedelta64(0, "ns"), np.datetime64("now", "ns")])
def test_nat_comparisons_numpy(other):
# Once numpy#17017 is fixed and the xfailed cases in test_nat_comparisons
# pass, this test can be removed
assert not NaT == other
assert NaT != other
assert not NaT < other
assert not NaT > other
assert not NaT <= other
assert not NaT >= other
@pytest.mark.parametrize("other_and_type", [("foo", "str"), (2, "int"), (2.0, "float")])
@pytest.mark.parametrize(
"symbol_and_op",
[("<=", operator.le), ("<", operator.lt), (">=", operator.ge), (">", operator.gt)],
)
def test_nat_comparisons_invalid(other_and_type, symbol_and_op):
# GH#35585
other, other_type = other_and_type
symbol, op = symbol_and_op
assert not NaT == other
assert not other == NaT
assert NaT != other
assert other != NaT
msg = f"'{symbol}' not supported between instances of 'NaTType' and '{other_type}'"
with pytest.raises(TypeError, match=msg):
op(NaT, other)
msg = f"'{symbol}' not supported between instances of '{other_type}' and 'NaTType'"
with pytest.raises(TypeError, match=msg):
op(other, NaT)
@pytest.mark.parametrize(
"other",
[
np.array(["foo"] * 2, dtype=object),
np.array([2, 3], dtype="int64"),
np.array([2.0, 3.5], dtype="float64"),
],
ids=["str", "int", "float"],
)
def test_nat_comparisons_invalid_ndarray(other):
# GH#40722
expected = np.array([False, False])
result = NaT == other
tm.assert_numpy_array_equal(result, expected)
result = other == NaT
tm.assert_numpy_array_equal(result, expected)
expected = np.array([True, True])
result = NaT != other
tm.assert_numpy_array_equal(result, expected)
result = other != NaT
tm.assert_numpy_array_equal(result, expected)
for symbol, op in [
("<=", operator.le),
("<", operator.lt),
(">=", operator.ge),
(">", operator.gt),
]:
msg = f"'{symbol}' not supported between"
with pytest.raises(TypeError, match=msg):
op(NaT, other)
if other.dtype == np.dtype("object"):
# uses the reverse operator, so symbol changes
msg = None
with pytest.raises(TypeError, match=msg):
op(other, NaT)
def test_compare_date(fixed_now_ts):
# GH#39151 comparing NaT with date object is deprecated
# See also: tests.scalar.timestamps.test_comparisons::test_compare_date
dt = fixed_now_ts.to_pydatetime().date()
for left, right in [(NaT, dt), (dt, NaT)]:
assert not left == right
assert left != right
with tm.assert_produces_warning(FutureWarning):
assert not left < right
with tm.assert_produces_warning(FutureWarning):
assert not left <= right
with tm.assert_produces_warning(FutureWarning):
assert not left > right
with tm.assert_produces_warning(FutureWarning):
assert not left >= right
# Once the deprecation is enforced, the following assertions
# can be enabled:
# assert not left == right
# assert left != right
#
# with pytest.raises(TypeError):
# left < right
# with pytest.raises(TypeError):
# left <= right
# with pytest.raises(TypeError):
# left > right
# with pytest.raises(TypeError):
# left >= right
@pytest.mark.parametrize(
"obj",
[
offsets.YearEnd(2),
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.MonthEnd(2),
offsets.MonthEnd(12),
offsets.Day(2),
offsets.Day(5),
offsets.Hour(24),
offsets.Hour(3),
offsets.Minute(),
np.timedelta64(3, "h"),
np.timedelta64(4, "h"),
np.timedelta64(3200, "s"),
np.timedelta64(3600, "s"),
np.timedelta64(3600 * 24, "s"),
np.timedelta64(2, "D"),
np.timedelta64(365, "D"),
timedelta(-2),
timedelta(365),
timedelta(minutes=120),
timedelta(days=4, minutes=180),
timedelta(hours=23),
timedelta(hours=23, minutes=30),
timedelta(hours=48),
],
)
def test_nat_addsub_tdlike_scalar(obj):
assert NaT + obj is NaT
assert obj + NaT is NaT
assert NaT - obj is NaT
def test_pickle():
# GH#4606
p = tm.round_trip_pickle(NaT)
assert p is NaT
def test_freq_deprecated():
with tm.assert_produces_warning(FutureWarning, match="deprecated"):
NaT.freq
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid network messages."""
import asyncio
import struct
import sys
from test_framework import messages
from test_framework.mininode import P2PDataStore, NetworkThread
from test_framework.test_framework import SyscoinTestFramework
class msg_unrecognized:
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
command = b'badmsg'
def __init__(self, *, str_data):
self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data
def serialize(self):
return messages.ser_string(self.str_data)
def __repr__(self):
return "{}(data={})".format(self.command, self.str_data)
class InvalidMessagesTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
"""
. Test msg header
0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see
that it isn't an effective DoS against the node.
1. Send an oversized (4MB+) message and check that we're disconnected.
2. Send a few messages with an incorrect data size in the header, ensure the
messages are ignored.
"""
self.test_magic_bytes()
self.test_checksum()
self.test_size()
self.test_command()
node = self.nodes[0]
self.node = node
node.add_p2p_connection(P2PDataStore())
conn2 = node.add_p2p_connection(P2PDataStore())
msg_limit = 32 * 1024 * 1024 # 32MiB, per MAX_PROTOCOL_MESSAGE_LENGTH
valid_data_limit = msg_limit - 5 # Account for the 4-byte length prefix
#
# 0.
#
# Send as large a message as is valid, ensure we aren't disconnected but
# also can't exhaust resources.
#
msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit)
assert len(msg_at_size.serialize()) == msg_limit
self.log.info("Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...")
# Upstream uses 80 iterations here, but its messages are 8x smaller.
# So with 10 iterations, we get the same amount of junk data sent
# to the node. If we use 80 here, Python uses an insane amount of
# memory by itself.
for _ in range(10):
node.p2p.send_message(msg_at_size)
# Check that, even though the node is being hammered by nonsense from one
# connection, it can still service other peers in a timely way.
for _ in range(20):
conn2.sync_with_ping(timeout=2)
# Peer 1, despite serving up a bunch of nonsense, should still be connected.
self.log.info("Waiting for node to drop junk messages.")
node.p2p.sync_with_ping(timeout=400)
assert node.p2p.is_connected
#
# 1.
#
# Send an oversized message, ensure we're disconnected.
#
# Under macOS this test is skipped due to an unexpected error code
# returned from the closing socket which python/asyncio does not
# yet know how to handle.
#
if sys.platform != 'darwin':
msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1))
assert len(msg_over_size.serialize()) == (msg_limit + 1)
# An unknown message type (or *any* message type) over
# MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect.
node.p2p.send_message(msg_over_size)
node.p2p.wait_for_disconnect(timeout=4)
node.disconnect_p2ps()
conn = node.add_p2p_connection(P2PDataStore())
conn.wait_for_verack()
else:
self.log.info("Skipping test p2p_invalid_messages/1 (oversized message) under macOS")
#
# 2.
#
# Send messages with an incorrect data size in the header.
#
actual_size = 100
msg = msg_unrecognized(str_data="b" * actual_size)
# TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect.
for wrong_size in (2, 77, 78, 79):
self.log.info("Sending a message with incorrect size of {}".format(wrong_size))
# Unmodified message should submit okay.
node.p2p.send_and_ping(msg)
# A message lying about its data size results in a disconnect when the incorrect
# data size is less than the actual size.
#
# TODO: why does behavior change at 78 bytes?
#
node.p2p.send_raw_message(self._tweak_msg_data_size(msg, wrong_size))
# For some reason unknown to me, we sometimes have to push additional data to the
# peer in order for it to realize a disconnect.
try:
node.p2p.send_message(messages.msg_ping(nonce=123123))
except IOError:
pass
node.p2p.wait_for_disconnect(timeout=10)
node.disconnect_p2ps()
node.add_p2p_connection(P2PDataStore())
# Node is still up.
conn = node.add_p2p_connection(P2PDataStore())
conn.sync_with_ping()
def test_magic_bytes(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
async def swap_magic_bytes():
conn._on_data = lambda: None # Need to ignore all incoming messages from now, since they come with "invalid" magic bytes
conn.magic_bytes = b'\x00\x11\x22\x32'
# Call .result() to block until the atomic swap is complete, otherwise
# we might run into races later on
asyncio.run_coroutine_threadsafe(swap_magic_bytes(), NetworkThread.network_event_loop).result()
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']):
conn.send_message(messages.msg_ping(nonce=0xff))
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_checksum(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['CHECKSUM ERROR (badmsg, 2 bytes), expected 78df0a04 was ffffffff']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
cut_len = (
4 + # magic
12 + # command
4 #len
)
# modify checksum
msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:]
self.nodes[0].p2p.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_size(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
cut_len = (
4 + # magic
12 # command
)
# modify len to MAX_SIZE + 1
msg = msg[:cut_len] + struct.pack("<I", 0x02000000 + 1) + msg[cut_len + 4:]
self.nodes[0].p2p.send_raw_message(msg)
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_command(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: ERRORS IN HEADER']):
msg = msg_unrecognized(str_data="d")
msg.command = b'\xff' * 12
msg = conn.build_message(msg)
# Modify command
msg = msg[:7] + b'\x00' + msg[7 + 1:]
self.nodes[0].p2p.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
def _tweak_msg_data_size(self, message, wrong_size):
"""
Return a raw message based on another message but with an incorrect data size in
the message header.
"""
raw_msg = self.node.p2p.build_message(message)
bad_size_bytes = struct.pack("<I", wrong_size)
num_header_bytes_before_size = 4 + 12
# Replace the correct data size in the message with an incorrect one.
raw_msg_with_wrong_size = (
raw_msg[:num_header_bytes_before_size] +
bad_size_bytes +
raw_msg[(num_header_bytes_before_size + len(bad_size_bytes)):]
)
assert len(raw_msg) == len(raw_msg_with_wrong_size)
return raw_msg_with_wrong_size
if __name__ == '__main__':
InvalidMessagesTest().main()
|
|
# 6.00 Problem Set 8
#
# Intelligent Course Advisor
#
# Name: Felipo Soranz
# Time:
# 18:18 started
# 18:24 problem 1
# 19:00 problem 2
# 19:17 problem 3
import time
SUBJECT_FILENAME = "subjects.txt"
VALUE, WORK = 0, 1
#
# Problem 1: Building A Subject Dictionary
#
def loadSubjects(filename):
"""
Returns a dictionary mapping subject name to (value, work), where the name
is a string and the value and work are integers. The subject information is
read from the file named by the string filename. Each line of the file
contains a string of the form "name,value,work".
returns: dictionary mapping subject name to (value, work)
"""
subjects = {}
inputFile = open(filename)
for line in inputFile:
course, value, word = line.strip().split(',')
subjects[course] = (int(value), int(word))
return subjects
# TODO: Instead of printing each line, modify the above to parse the name,
# value, and work of each subject and create a dictionary mapping the name
# to the (value, work).
def printSubjects(subjects):
"""
Prints a string containing name, value, and work of each subject in
the dictionary of subjects and total value and work of all subjects
"""
totalVal, totalWork = 0,0
if len(subjects) == 0:
return 'Empty SubjectList'
res = 'Course\tValue\tWork\n======\t====\t=====\n'
subNames = list(subjects.keys())
subNames.sort()
for s in subNames:
val = subjects[s][VALUE]
work = subjects[s][WORK]
res = res + s + '\t' + str(val) + '\t' + str(work) + '\n'
totalVal += val
totalWork += work
res = res + '\nTotal Value:\t' + str(totalVal) +'\n'
res = res + 'Total Work:\t' + str(totalWork) + '\n'
print(res)
def cmpValue(subInfo1, subInfo2):
"""
Returns True if value in (value, work) tuple subInfo1 is GREATER than
value in (value, work) tuple in subInfo2
"""
val1 = subInfo1[VALUE]
val2 = subInfo2[VALUE]
return val1 > val2
def cmpWork(subInfo1, subInfo2):
"""
Returns True if work in (value, work) tuple subInfo1 is LESS than than work
in (value, work) tuple in subInfo2
"""
work1 = subInfo1[WORK]
work2 = subInfo2[WORK]
return work1 < work2
def cmpRatio(subInfo1, subInfo2):
"""
Returns True if value/work in (value, work) tuple subInfo1 is
GREATER than value/work in (value, work) tuple in subInfo2
"""
val1 = subInfo1[VALUE]
val2 = subInfo2[VALUE]
work1 = subInfo1[WORK]
work2 = subInfo2[WORK]
return float(val1) / work1 > float(val2) / work2
#
# Problem 2: Subject Selection By Greedy Optimization
#
def greedyAdvisor(subjects, maxWork, comparator):
"""
Returns a dictionary mapping subject name to (value, work) which includes
subjects selected by the algorithm, such that the total work of subjects in
the dictionary is not greater than maxWork. The subjects are chosen using
a greedy algorithm. The subjects dictionary should not be mutated.
subjects: dictionary mapping subject name to (value, work)
maxWork: int >= 0
comparator: function taking two tuples and returning a bool
returns: dictionary mapping subject name to (value, work)
"""
selected = {}
changed = True
while changed:
changed = False
best = None
for key in subjects.keys():
#print("key =", key)
#print("best =", best)
if key in selected:
continue
elif subjects[key][WORK] <= maxWork and (best == None or comparator(subjects[key], subjects[best])):
best = key
changed = True
#print("found better: ", best, subjects[best])
if changed:
maxWork -= subjects[best][WORK]
selected[best] = subjects[best]
return selected
# Tests
##smallCatalog = {'6.00': (16, 8), '1.00': (7, 7), '6.01': (5, 3), '15.01': (9, 6)}
##print("cmpValue")
##printSubjects(greedyAdvisor(smallCatalog, 15, cmpValue))
##print("cmpWork")
##printSubjects(greedyAdvisor(smallCatalog, 15, cmpWork))
##print("cmpRatio")
##printSubjects(greedyAdvisor(smallCatalog, 15, cmpRatio))
##
##subjects = loadSubjects(SUBJECT_FILENAME)
##print("cmpValue")
##printSubjects(greedyAdvisor(subjects, 15, cmpValue))
##print("cmpWork")
##printSubjects(greedyAdvisor(subjects, 15, cmpWork))
##print("cmpRatio")
##printSubjects(greedyAdvisor(subjects, 15, cmpRatio))
def bruteForceAdvisor(subjects, maxWork):
"""
Returns a dictionary mapping subject name to (value, work), which
represents the globally optimal selection of subjects using a brute force
algorithm.
subjects: dictionary mapping subject name to (value, work)
maxWork: int >= 0
returns: dictionary mapping subject name to (value, work)
"""
nameList = list(subjects.keys())
tupleList = list(subjects.values())
bestSubset, bestSubsetValue = \
bruteForceAdvisorHelper(tupleList, maxWork, 0, None, None, [], 0, 0)
outputSubjects = {}
for i in bestSubset:
outputSubjects[nameList[i]] = tupleList[i]
return outputSubjects
def bruteForceAdvisorHelper(subjects, maxWork, i, bestSubset, bestSubsetValue,
subset, subsetValue, subsetWork):
global num_calls
num_calls += 1
# Hit the end of the list.
if i >= len(subjects):
if bestSubset == None or subsetValue > bestSubsetValue:
# Found a new best.
return subset[:], subsetValue
else:
# Keep the current best.
return bestSubset, bestSubsetValue
else:
s = subjects[i]
# Try including subjects[i] in the current working subset.
if subsetWork + s[WORK] <= maxWork:
subset.append(i)
bestSubset, bestSubsetValue = bruteForceAdvisorHelper(subjects,
maxWork, i+1, bestSubset, bestSubsetValue, subset,
subsetValue + s[VALUE], subsetWork + s[WORK])
subset.pop()
bestSubset, bestSubsetValue = bruteForceAdvisorHelper(subjects,
maxWork, i+1, bestSubset, bestSubsetValue, subset,
subsetValue, subsetWork)
return bestSubset, bestSubsetValue
#
# Problem 3: Subject Selection By Brute Force
#
def bruteForceTime():
"""
Runs tests on bruteForceAdvisor and measures the time required to compute
an answer.
"""
subjects = loadSubjects(SUBJECT_FILENAME)
for work in range(1, 10):
start = time.time()
bruteForceAdvisor(subjects, work)
elapsed = time.time() - start
print("Elapsed time for work =", work, " was =", elapsed, "seconds")
# Problem 3 Observations
# ======================
#
# TODO: write here your observations regarding bruteForceTime's performance
#bruteForceTime()
##Elapsed time for work = 1 was = 0.016000032424926758 seconds
##Elapsed time for work = 2 was = 0.03099989891052246 seconds
##Elapsed time for work = 3 was = 0.12400007247924805 seconds
##Elapsed time for work = 4 was = 0.42100000381469727 seconds
##Elapsed time for work = 5 was = 1.2639999389648438 seconds
##Elapsed time for work = 6 was = 3.5879998207092285 seconds
##Elapsed time for work = 7 was = 12.869999885559082 seconds
##Elapsed time for work = 8 was = 34.37399983406067 seconds
##Elapsed time for work = 9 was = 92.40900015830994 seconds
#
# Problem 4: Subject Selection By Dynamic Programming
#
def dpAdvisor(subjects, maxWork):
"""
Returns a dictionary mapping subject name to (value, work) that contains a
set of subjects that provides the maximum value without exceeding maxWork.
subjects: dictionary mapping subject name to (value, work)
maxWork: int >= 0
returns: dictionary mapping subject name to (value, work)
"""
courses = []
works = []
values = []
for key in subjects.keys():
courses.append(key)
works.append(subjects[key][0])
values.append(subjects[key][1])
memo = {}
winners = dpAdvisorHelper(works, values, len(values) - 1, maxWork, memo)
results = {}
for i in winners:
results[courses[i]] = (values[i], works[i])
return results
# TODO: This implementation is incomplete
# The result is not optimal
def dpAdvisorHelper(works, values, i, available_work, memo):
global num_calls
num_calls += 1
try:
return memo[(i, available_work)]
except KeyError:
pass
if i == 0:
if works[i] <= available_work:
memo[(i, available_work)] = [i]
return [i]
else:
return []
without_i = dpAdvisorHelper(works, values, i - 1, available_work, memo)
if works[i] > available_work:
memo[(i, available_work)] = without_i
return without_i
else:
with_i = [i] + dpAdvisorHelper(works, values, i - 1, available_work - works[i], memo)
if branch_value(with_i, values) >= branch_value(without_i, values):
winners = with_i
else:
winners = without_i
memo[(i, available_work)] = winners
return winners
def branch_value(branch, value):
total = 0
for i in branch:
total += value[i]
return total
##subjects = {'a1': (16, 8), 'b1': (7, 7), 'c1': (5, 3), 'd1': (9, 6)}
##work = 20
##subjects = loadSubjects(SUBJECT_FILENAME)
##work = 5
##print("\n>>> dpAdvisor <<< \n")
##num_calls = 0
##printSubjects(dpAdvisor(subjects, work))
##print("number of calls =", num_calls)
##
##print("\n>>> bruteForceAdvisor <<< \n")
##num_calls = 0
##printSubjects(bruteForceAdvisor(subjects, work))
##print("number of calls =", num_calls)
num_calls = 0
#
# Problem 5: Performance Comparison
#
def dpTime():
"""
Runs tests on dpAdvisor and measures the time required to compute an
answer.
"""
global num_calls
subjects = loadSubjects(SUBJECT_FILENAME)
for work in range(5, 100, 10):
start = time.time()
num_calls = 0
result = dpAdvisor(subjects, work)
#printSubjects(result)
elapsed = time.time() - start
print("Elapsed time for work =", work, " was =", elapsed, "seconds")
# Problem 5 Observations
# ======================
#
# TODO: write here your observations regarding dpAdvisor's performance and
# how its performance compares to that of bruteForceAdvisor.
##dpTime()
####Elapsed time for work = 5 was = 0.019999980926513672 seconds
####Elapsed time for work = 15 was = 0.08999991416931152 seconds
####Elapsed time for work = 25 was = 0.15999984741210938 seconds
####Elapsed time for work = 35 was = 0.25999999046325684 seconds
####Elapsed time for work = 45 was = 0.3710000514984131 seconds
####Elapsed time for work = 55 was = 0.49899983406066895 seconds
####Elapsed time for work = 65 was = 0.35899996757507324 seconds
####Elapsed time for work = 75 was = 0.7799999713897705 seconds
####Elapsed time for work = 85 was = 0.9200000762939453 seconds
####Elapsed time for work = 95 was = 1.1349999904632568 seconds
|
|
"""
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (c) 2014, Kersten Doering <[email protected]>, Christian Senger <[email protected]>
"""
import xappy
import sys
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from SynonymParser import SynonymParser
class Article():
user = "parser"
password = "parser"
host = "localhost"
port = "5432"
db = ""
con = "postgresql://"+user+":"+password+"@"+host+":"+port+"/"
#Kersten: set these attributes when calling static function getConnection(database)
base = None#declarative_base()
engine = None#create_engine(__con, pool_recycle = 900, echo=False)
base = None#__base.metadata.create_all(__engine)
session = None#sessionmaker(bind=__engine)()
__count = 0
__countMsg = ""
def __init__(
self,
pmid
):
self.__pmid = int(pmid)
self.__title = None
self.__abstract = None
self.__chemicals= []
self.__keywords = []
self.__mesh = []
self.__loadStub()
self.__loadChemicals()
self.__loadKeywords()
self.__loadMeSH()
Article.__count += 1
nbs = len(Article.__countMsg)
Article.__countMsg = "article %s created" % (str(Article.__count))
sys.stdout.write('\b' * nbs + Article.__countMsg)
@staticmethod
def getConnection(database):
Article.con = "postgresql://"+Article.user+":"+Article.password+"@"+Article.host+":"+Article.port+"/"+database
Article.base = declarative_base()
Article.engine = create_engine(Article.con, pool_recycle = 900, echo=False)
Article.base = Article.base.metadata.create_all(Article.engine)
Article.session = sessionmaker(bind=Article.engine)()
def getPMID(self):
return self.__pmid
def getTitle(self):
return self.__title
def getAbstract(self):
return self.__abstract
def getChemicals(self):
return self.__chemicals
def getKeywords(self):
return self.__keywords
def getMeSH(self):
return self.__mesh
def __loadStub(self):
pmid = str(self.__pmid)
#print "####",pmid,"####"#in this case it is always one pmid - it is not a "complete" join
stmt = """
SELECT
pmid,
article_title as title,
abstract_text as abstract
FROM
pubmed.tbl_medline_citation
LEFT OUTER JOIN
pubmed.tbl_abstract
ON pmid = fk_pmid
WHERE
pmid = '"""+pmid+"""'
;
"""
articles = Article.session.query(
"pmid",
"title",
"abstract"
).from_statement(stmt)
for article in articles:
self.__title = article.title
self.__abstract = article.abstract
break;
def __loadChemicals(self):
pmid = str(self.__pmid)
stmt = """
SELECT
name_of_substance AS substance
FROM
pubmed.tbl_chemical
WHERE
fk_pmid = '"""+pmid+"""'
ORDER BY
name_of_substance;
"""
substances = Article.session.query(
"substance"
).from_statement(stmt)
for substance in substances:
self.__chemicals.append(substance.substance)
def __loadKeywords(self):
pmid = str(self.__pmid)
stmt = """
SELECT
keyword
FROM
pubmed.tbl_keyword
WHERE
fk_pmid = '"""+pmid+"""'
ORDER BY
keyword;
"""
keywords = Article.session.query(
"keyword"
).from_statement(stmt)
for keyword in keywords:
self.__keywords.append(keyword.keyword)
def __loadMeSH(self):
pmid = str(self.__pmid)
stmt = """
SELECT
descriptor_name
FROM
pubmed.tbl_mesh_heading
WHERE
fk_pmid = '"""+pmid+"""'
ORDER BY
descriptor_name;
"""
mesh_terms = Article.session.query(
"descriptor_name"
).from_statement(stmt)
for descriptor_name in mesh_terms:
self.__mesh.append(descriptor_name.descriptor_name)
@staticmethod
def getArticlesByYear(b_year, e_year):
b_year = int(b_year)
e_year = int(e_year)
stmt = """
SELECT
pmc.fk_pmid
FROM
pubmed.tbl_journal pmc
WHERE
pub_date_year >= """+str(b_year)+"""
AND
pub_date_year <= """+str(e_year)+"""
;
"""
pmids = Article.session.query(
"fk_pmid"
).from_statement(stmt)
return [Article(pmid.fk_pmid) for pmid in pmids]
@staticmethod
def closeConnection():
Article.session.close()
|
|
#!/usr/bin/python
import itertools
class FPNode:
def __init__(self,id, parent):
self.frequency = 0
self.id = id
self.next = None
self.children = dict()
self.parent = parent
def __str__(self):
return "id:%s freq:%s" % (self.id, self.frequency)
def add(self,pattern, index,tree):
if len(pattern) == index+1 and self.id == pattern[index][0]:
self.frequency += pattern[index][1]
else:
if not self.children.has_key(pattern[index+1][0]):
n = FPNode(pattern[index+1][0],self)
self.children[pattern[index+1][0]] = n
tree.insert_header(n)
self.frequency += pattern[index][1]
self.children[pattern[index+1][0]].add(pattern,index+1,tree)
def str_val(self, mappings):
return self.get_str_val('',mappings)
def get_str_val(self,spaces,mappings):
accum = ''
if not self.id == 'root':
accum = '%s%s: %s' % (spaces, str(mappings[self.id].item), str(self.frequency))+'\n'
else:
accum = 'root\n'
for _,v in self.children.items():
accum += v.get_str_val(spaces+ ' ',mappings)
return accum
class HeaderTableItem:
def __init__(self,id,item,frequency):
self.id = id
self.item = item
self.node = None
self.frequency = frequency
def __str__(self):
s = 'item: %s id: %s freq: %s- ' % (self.item,self.id,self.frequency)
curr = self.node
while curr != None :
s += ' '+str(curr)
curr = curr.next
return s
def create_fp_tree(datasource,support):
datasource = [[(y,1) for y in x] for x in datasource]
return FPTree(datasource,support)
class FPTree:
def __init__(self,datasource, support, base_tree = None):
self.base_tree = base_tree == None and self or base_tree
self.support = support
self.root = FPNode('root',None)
self.lookup = {}
header = dict()
for transaction in datasource:
for item in transaction:
if not item[0] in header.keys():
header[item[0]] = 0
header[item[0]] += item[1]
self.header_table=[]
self.mapping_table = dict()
for x in sorted([(value,key) for (key,value) in header.items() if value >= self.support], reverse= True):
self.header_table.append(HeaderTableItem(len(self.header_table),x[1],x[0]))
self.mapping_table[x[1]] = len(self.header_table) - 1
for transaction in datasource:
trans = [(self.mapping_table[x[0]],x[1]) for x in transaction if self.mapping_table.has_key(x[0])]
trans.sort()
if len(trans) > 0:
if not self.root.children.has_key(trans[0][0]):
self.root.children[trans[0][0]] = FPNode(trans[0][0], self.root)
self.insert_header(self.root.children[trans[0][0]])
self.root.children[trans[0][0]].add(trans,0,self)
for i in range(len(self.header_table)):
self.lookup[self.header_table[i].item] = i
def __str__(self):
return self.root.str_val(self.header_table)
def insert_header(self, n):
curr = self.header_table[n.id].node
if curr == None:
self.header_table[n.id].node = n
else:
while curr.next != None :
curr = curr.next
curr.next = n
def conditional_tree_datasource(self,currlink):
patterns = []
while currlink != None:
support = currlink.frequency
currnode = currlink.parent
pattern = []
while currnode != self.root:
pattern.append((self.header_table[ currnode.id].item,support))
currnode = currnode.parent
if len(pattern) > 0:
patterns.append( pattern)
currlink = currlink.next
return patterns
def single_chain(self):
curr = self.root
while curr != None:
if len(curr.children) > 1:
return False
if len(curr.children) == 0:
return True
curr = curr.children.values()[0]
return True
def sort_pattern(self, pattern):
return sorted(pattern, key=lambda x: self.lookup[x])
def fp_growth(self):
for px in self.fp_growth_raw():
yield (self.sort_pattern([item[0] for item in px]), min([item[1] for item in px]))
def fp_growth_raw(self, pattern = []):
if self.single_chain():
optional = []
if len(self.root.children) > 0:
curr = self.root.children.values()[0]
while True:
optional.append((self.header_table[curr.id].item,curr.frequency))
if len(curr.children) > 0:
curr = curr.children.values()[0]
else:
break
for i in range(1,len(optional)+1):
for permpat in itertools.combinations(optional, i):
p = [x for x in permpat]
p.extend(pattern)
yield p
else:
for x in range(len(self.header_table)-1,-1,-1):
if self.support <= self.header_table[x].frequency:
pattern_base = [y for y in pattern]
pattern_base.append((self.header_table[x].item,self.header_table[x].frequency))
yield pattern_base
tree = FPTree(self.conditional_tree_datasource(self.header_table[x].node), self.support)
for px in tree.fp_growth_raw(pattern_base):
yield px
#class PrefixNode:
# def __init__(self,key):
# self.key = key
# self.value = -1
# self.children = {}
# def add(self,pattern_freq, index):
# if len(pattern_freq) == index+1 and self.key == pattern_freq[0][index]:
# self.value = pattern_freq[1]
# else:
# if not self.children.has_key(pattern_freq[0][index+1]):
# n = PrefixNode(pattern_freq[0][index+1])
# self.children[pattern_freq[0][index+1]] = n
# self.children[pattern_freq[0][index+1]].add(pattern,index+1)
#
# def get(self,pattern, index):
# if len(pattern) == index+1 and self.key == pattern[index]:
# return self.value
# else:
# if not self.children.has_key(pattern[index+1]):
# return None
# return self.children[pattern[index+1]].get(pattern,index+1)
#
#
#class PrefixTree:
# def __init__(self):
# self.root = PrefixNode('root')
#
# def add(self, pattern_freq):
# if not self.root.children.has_key(pattern_freq[0][0]):
# self.root.children[pattern_freq[0][0]] = PrefixNode(pattern_freq[0][0])
#
# self.root.children[pattern_freq[0][0]].add(pattern_freq,0)
#
# def get(self, pattern):
# if not self.root.children.has_key(pattern[0]):
# return None
# if len(pattern) == 1:
# return self.root.children[pattern[0]].value
# return self.root.children[pattern[0]].get(pattern,0)
#
#def create_prefix_tree(tree):
# p_tree = PrefixTree()
# for x in t.fp_growth():
# p_tree.add(x)
def class_association_rules(prefix_tree, pattern_freq,threshold):
for i in range(len(pattern_freq[0])):
rhs = pattern_freq[0][i]
lhs = [x for x in pattern_freq[0] if x != rhs]
confidence = prefix_tree.get(lhs) / pattern_freq[1]
yield (lhs,rhs,confidence)
arr = [['hannah','spencer'],['hannah','spencer','rika','fred','mom'],['hannah','rika','fred'],['hannah','spencer','gordon']]
t = create_fp_tree(arr,0) # create a tree from arr with minimum frequency 0
for p in t.fp_growth():
print p
|
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.applehelp
~~~~~~~~~~~~~~~~~~~~~~~~~
Build Apple help books.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import codecs
import pipes
from os import path
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util import copy_static_entry
from sphinx.util.osutil import copyfile, ensuredir
from sphinx.util.console import bold
from sphinx.util.pycompat import htmlescape
from sphinx.util.matching import compile_matchers
from sphinx.errors import SphinxError
import plistlib
import subprocess
# Use plistlib.dump in 3.4 and above
try:
write_plist = plistlib.dump
except AttributeError:
write_plist = plistlib.writePlist
# False access page (used because helpd expects strict XHTML)
access_page_template = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"\
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="robots" content="noindex" />
<meta http-equiv="refresh" content="0;url=%(toc)s" />
</head>
<body>
</body>
</html>
'''
class AppleHelpIndexerFailed(SphinxError):
category = 'Help indexer failed'
class AppleHelpCodeSigningFailed(SphinxError):
category = 'Code signing failed'
class AppleHelpBuilder(StandaloneHTMLBuilder):
"""
Builder that outputs an Apple help book. Requires Mac OS X as it relies
on the ``hiutil`` command line tool.
"""
name = 'applehelp'
# don't copy the reST source
copysource = False
supported_image_types = ['image/png', 'image/gif', 'image/jpeg',
'image/tiff', 'image/jp2', 'image/svg+xml']
# don't add links
add_permalinks = False
# this is an embedded HTML format
embedded = True
# don't generate the search index or include the search page
search = False
def init(self):
super(AppleHelpBuilder, self).init()
# the output files for HTML help must be .html only
self.out_suffix = '.html'
if self.config.applehelp_bundle_id is None:
raise SphinxError('You must set applehelp_bundle_id before '
'building Apple Help output')
self.bundle_path = path.join(self.outdir,
self.config.applehelp_bundle_name +
'.help')
self.outdir = path.join(self.bundle_path,
'Contents',
'Resources',
self.config.applehelp_locale + '.lproj')
def handle_finish(self):
super(AppleHelpBuilder, self).handle_finish()
self.finish_tasks.add_task(self.copy_localized_files)
self.finish_tasks.add_task(self.build_helpbook)
def copy_localized_files(self):
source_dir = path.join(self.confdir,
self.config.applehelp_locale + '.lproj')
target_dir = self.outdir
if path.isdir(source_dir):
self.info(bold('copying localized files... '), nonl=True)
ctx = self.globalcontext.copy()
matchers = compile_matchers(self.config.exclude_patterns)
copy_static_entry(source_dir, target_dir, self, ctx,
exclude_matchers=matchers)
self.info('done')
def build_helpbook(self):
contents_dir = path.join(self.bundle_path, 'Contents')
resources_dir = path.join(contents_dir, 'Resources')
language_dir = path.join(resources_dir,
self.config.applehelp_locale + '.lproj')
for d in [contents_dir, resources_dir, language_dir]:
ensuredir(d)
# Construct the Info.plist file
toc = self.config.master_doc + self.out_suffix
info_plist = {
'CFBundleDevelopmentRegion': self.config.applehelp_dev_region,
'CFBundleIdentifier': self.config.applehelp_bundle_id,
'CFBundleInfoDictionaryVersion': '6.0',
'CFBundlePackageType': 'BNDL',
'CFBundleShortVersionString': self.config.release,
'CFBundleSignature': 'hbwr',
'CFBundleVersion': self.config.applehelp_bundle_version,
'HPDBookAccessPath': '_access.html',
'HPDBookIndexPath': 'search.helpindex',
'HPDBookTitle': self.config.applehelp_title,
'HPDBookType': '3',
'HPDBookUsesExternalViewer': False,
}
if self.config.applehelp_icon is not None:
info_plist['HPDBookIconPath'] \
= path.basename(self.config.applehelp_icon)
if self.config.applehelp_kb_url is not None:
info_plist['HPDBookKBProduct'] = self.config.applehelp_kb_product
info_plist['HPDBookKBURL'] = self.config.applehelp_kb_url
if self.config.applehelp_remote_url is not None:
info_plist['HPDBookRemoteURL'] = self.config.applehelp_remote_url
self.info(bold('writing Info.plist... '), nonl=True)
with open(path.join(contents_dir, 'Info.plist'), 'wb') as f:
write_plist(info_plist, f)
self.info('done')
# Copy the icon, if one is supplied
if self.config.applehelp_icon:
self.info(bold('copying icon... '), nonl=True)
try:
copyfile(path.join(self.srcdir, self.config.applehelp_icon),
path.join(resources_dir, info_plist['HPDBookIconPath']))
self.info('done')
except Exception as err:
self.warn('cannot copy icon file %r: %s' %
(path.join(self.srcdir, self.config.applehelp_icon),
err))
del info_plist['HPDBookIconPath']
# Build the access page
self.info(bold('building access page...'), nonl=True)
f = codecs.open(path.join(language_dir, '_access.html'), 'w')
try:
f.write(access_page_template % {
'toc': htmlescape(toc, quote=True),
'title': htmlescape(self.config.applehelp_title)
})
finally:
f.close()
self.info('done')
# Generate the help index
self.info(bold('generating help index... '), nonl=True)
args = [
self.config.applehelp_indexer_path,
'-Cf',
path.join(language_dir, 'search.helpindex'),
language_dir
]
if self.config.applehelp_index_anchors is not None:
args.append('-a')
if self.config.applehelp_min_term_length is not None:
args += ['-m', '%s' % self.config.applehelp_min_term_length]
if self.config.applehelp_stopwords is not None:
args += ['-s', self.config.applehelp_stopwords]
if self.config.applehelp_locale is not None:
args += ['-l', self.config.applehelp_locale]
if self.config.applehelp_disable_external_tools:
self.info('skipping')
self.warn('you will need to index this help book with:\n %s'
% (' '.join([pipes.quote(arg) for arg in args])))
else:
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = p.communicate()[0]
if p.returncode != 0:
raise AppleHelpIndexerFailed(output)
else:
self.info('done')
# If we've been asked to, sign the bundle
if self.config.applehelp_codesign_identity:
self.info(bold('signing help book... '), nonl=True)
args = [
self.config.applehelp_codesign_path,
'-s', self.config.applehelp_codesign_identity,
'-f'
]
args += self.config.applehelp_codesign_flags
args.append(self.bundle_path)
if self.config.applehelp_disable_external_tools:
self.info('skipping')
self.warn('you will need to sign this help book with:\n %s'
% (' '.join([pipes.quote(arg) for arg in args])))
else:
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = p.communicate()[0]
if p.returncode != 0:
raise AppleHelpCodeSigningFailed(output)
else:
self.info('done')
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import webob
from cinder.api import extensions
from cinder.api.v1 import volume_metadata
from cinder.api.v1 import volumes
import cinder.db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v1 import stubs
CONF = cfg.CONF
def return_create_volume_metadata_max(context, volume_id, metadata, delete):
return stub_max_volume_metadata()
def return_create_volume_metadata(context, volume_id, metadata, delete):
return stub_volume_metadata()
def return_new_volume_metadata(context, volume_id, metadata, delete):
return stub_new_volume_metadata()
def return_create_volume_metadata_insensitive(context, snapshot_id,
metadata, delete):
return stub_volume_metadata_insensitive()
def return_volume_metadata(context, volume_id):
if not isinstance(volume_id, str) or not len(volume_id) == 36:
msg = 'id %s must be a uuid in return volume metadata' % volume_id
raise Exception(msg)
return stub_volume_metadata()
def return_empty_volume_metadata(context, volume_id):
return {}
def return_empty_container_metadata(context, volume_id, metadata, delete):
return {}
def delete_volume_metadata(context, volume_id, key):
pass
def stub_volume_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_new_volume_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def stub_volume_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def stub_max_volume_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_volume(context, volume_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'metadata': {},
'project_id': context.project_id}
def return_volume_nonexistent(context, volume_id):
raise exception.VolumeNotFound('bogus test message')
def fake_update_volume_metadata(self, context, volume, diff):
pass
class volumeMetaDataTest(test.TestCase):
def setUp(self):
super(volumeMetaDataTest, self).setUp()
self.volume_api = cinder.volume.api.API()
self.stubs.Set(cinder.db, 'volume_get', return_volume)
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(cinder.db, 'service_get_all_by_topic',
stubs.stub_service_get_all_by_topic)
self.stubs.Set(self.volume_api, 'update_volume_metadata',
fake_update_volume_metadata)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.volume_controller = volumes.VolumeController(self.ext_mgr)
self.controller = volume_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v1/fake/volumes/%s/metadata' % self.req_id
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"metadata": {}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.volume_controller.create(req, body)
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key6')
def test_delete(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(cinder.db, 'volume_metadata_delete',
delete_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(200, res.status_int)
def test_delete_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key6')
def test_create(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_empty_volume_metadata)
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank('/v1/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3", }}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(body, res_dict)
def test_create_with_keys_in_uppercase_and_lowercase(self):
# if the keys in uppercase_and_lowercase, should return the one
# which server added
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_empty_volume_metadata)
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata_insensitive)
req = fakes.HTTPRequest.blank('/v1/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"KEY1": "value1",
"key2": "value2",
"KEY2": "value2",
"key3": "value3",
"KEY4": "value4"}}
expected = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_create_empty_body(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, None)
def test_create_item_empty_key(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, body)
def test_create_item_key_too_long(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.req_id, body)
def test_create_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_get',
return_volume_nonexistent)
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank('/v1/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.req_id, body)
def test_update_all(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_new_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_with_keys_in_uppercase_and_lowercase(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_create_volume_metadata)
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_new_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {
'metadata': {
'key10': 'value10',
'KEY10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_empty_container_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_malformed_data(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_get', return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.req_id, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank('/v1.1/fake/volumes/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_empty_body(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
None)
def test_update_item_empty_key(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, '', body)
def test_update_item_key_too_long(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, ("a" * 260), body)
def test_update_item_value_too_long(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, "key1", body)
def test_update_item_too_many_keys(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'bad',
body)
def test_invalid_metadata_items_on_create(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
#test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, data)
|
|
import gc
import sys
import time
import blinker
from nose.tools import assert_raises
jython = sys.platform.startswith('java')
pypy = hasattr(sys, 'pypy_version_info')
try:
from _test_async import test_send_async
except (SyntaxError, ImportError):
pass
def collect_acyclic_refs():
# cpython releases these immediately without a collection
if jython or pypy:
gc.collect()
if jython:
time.sleep(0.1)
class Sentinel(list):
"""A signal receipt accumulator."""
def make_receiver(self, key):
"""Return a generic signal receiver function logging as *key*
When connected to a signal, appends (key, sender, kw) to the Sentinel.
"""
def receiver(*sentby, **kw):
self.append((key, sentby[0], kw))
receiver.func_name = 'receiver_%s' % key
return receiver
def test_meta_connect():
sentinel = []
def meta_received(sender, **kw):
sentinel.append(dict(kw, sender=sender))
assert not blinker.receiver_connected.receivers
blinker.receiver_connected.connect(meta_received)
assert not sentinel
def receiver(sender, **kw):
pass
sig = blinker.Signal()
sig.connect(receiver)
assert sentinel == [{'sender': sig,
'receiver_arg': receiver,
'sender_arg': blinker.ANY,
'weak_arg': True}]
blinker.receiver_connected._clear_state()
def _test_signal_signals(sender):
sentinel = Sentinel()
sig = blinker.Signal()
connected = sentinel.make_receiver('receiver_connected')
disconnected = sentinel.make_receiver('receiver_disconnected')
receiver1 = sentinel.make_receiver('receiver1')
receiver2 = sentinel.make_receiver('receiver2')
assert not sig.receiver_connected.receivers
assert not sig.receiver_disconnected.receivers
sig.receiver_connected.connect(connected)
sig.receiver_disconnected.connect(disconnected)
assert sig.receiver_connected.receivers
assert not sentinel
for receiver, weak in [(receiver1, True), (receiver2, False)]:
sig.connect(receiver, sender=sender, weak=weak)
expected = ('receiver_connected',
sig,
{'receiver': receiver, 'sender': sender, 'weak': weak})
assert sentinel[-1] == expected
# disconnect from explicit sender
sig.disconnect(receiver1, sender=sender)
expected = ('receiver_disconnected',
sig,
{'receiver': receiver1, 'sender': sender})
assert sentinel[-1] == expected
# disconnect from ANY and all senders (implicit disconnect signature)
sig.disconnect(receiver2)
assert sentinel[-1] == ('receiver_disconnected',
sig,
{'receiver': receiver2, 'sender': blinker.ANY})
def test_signal_signals_any_sender():
_test_signal_signals(blinker.ANY)
def test_signal_signals_strong_sender():
_test_signal_signals("squiznart")
def test_signal_weak_receiver_vanishes():
# non-edge-case path for weak receivers is exercised in the ANY sender
# test above.
sentinel = Sentinel()
sig = blinker.Signal()
connected = sentinel.make_receiver('receiver_connected')
disconnected = sentinel.make_receiver('receiver_disconnected')
receiver1 = sentinel.make_receiver('receiver1')
receiver2 = sentinel.make_receiver('receiver2')
sig.receiver_connected.connect(connected)
sig.receiver_disconnected.connect(disconnected)
# explicit disconnect on a weak does emit the signal
sig.connect(receiver1, weak=True)
sig.disconnect(receiver1)
assert len(sentinel) == 2
assert sentinel[-1][2]['receiver'] is receiver1
del sentinel[:]
sig.connect(receiver2, weak=True)
assert len(sentinel) == 1
del sentinel[:] # holds a ref to receiver2
del receiver2
collect_acyclic_refs()
# no disconnect signal is fired
assert len(sentinel) == 0
# and everything really is disconnected
sig.send('abc')
assert len(sentinel) == 0
def test_signal_signals_weak_sender():
sentinel = Sentinel()
sig = blinker.Signal()
connected = sentinel.make_receiver('receiver_connected')
disconnected = sentinel.make_receiver('receiver_disconnected')
receiver1 = sentinel.make_receiver('receiver1')
receiver2 = sentinel.make_receiver('receiver2')
class Sender(object):
"""A weakref-able object."""
sig.receiver_connected.connect(connected)
sig.receiver_disconnected.connect(disconnected)
sender1 = Sender()
sig.connect(receiver1, sender=sender1, weak=False)
# regular disconnect of weak-able sender works fine
sig.disconnect(receiver1, sender=sender1)
assert len(sentinel) == 2
del sentinel[:]
sender2 = Sender()
sig.connect(receiver2, sender=sender2, weak=False)
# force sender2 to go out of scope
del sender2
collect_acyclic_refs()
# no disconnect signal is fired
assert len(sentinel) == 1
# and everything really is disconnected
sig.send('abc')
assert len(sentinel) == 1
def test_empty_bucket_growth():
sentinel = Sentinel()
sig = blinker.Signal()
senders = lambda: (len(sig._by_sender),
sum(len(i) for i in sig._by_sender.values()))
receivers = lambda: (len(sig._by_receiver),
sum(len(i) for i in sig._by_receiver.values()))
receiver1 = sentinel.make_receiver('receiver1')
receiver2 = sentinel.make_receiver('receiver2')
class Sender(object):
"""A weakref-able object."""
sender = Sender()
sig.connect(receiver1, sender=sender)
sig.connect(receiver2, sender=sender)
assert senders() == (1, 2)
assert receivers() == (2, 2)
sig.disconnect(receiver1, sender=sender)
assert senders() == (1, 1)
assert receivers() == (2, 1)
sig.disconnect(receiver2, sender=sender)
assert senders() == (1, 0)
assert receivers() == (2, 0)
sig._cleanup_bookkeeping()
assert senders() == (0, 0)
assert receivers() == (0, 0)
def test_meta_connect_failure():
def meta_received(sender, **kw):
raise TypeError('boom')
assert not blinker.receiver_connected.receivers
blinker.receiver_connected.connect(meta_received)
def receiver(sender, **kw):
pass
sig = blinker.Signal()
assert_raises(TypeError, sig.connect, receiver)
assert not sig.receivers
assert not sig._by_receiver
assert sig._by_sender == {blinker.base.ANY_ID: set()}
blinker.receiver_connected._clear_state()
def test_weak_namespace():
ns = blinker.WeakNamespace()
assert not ns
s1 = ns.signal('abc')
assert s1 is ns.signal('abc')
assert s1 is not ns.signal('def')
assert 'abc' in ns
collect_acyclic_refs()
# weak by default, already out of scope
assert 'def' not in ns
del s1
collect_acyclic_refs()
assert 'abc' not in ns
def test_namespace():
ns = blinker.Namespace()
assert not ns
s1 = ns.signal('abc')
assert s1 is ns.signal('abc')
assert s1 is not ns.signal('def')
assert 'abc' in ns
del s1
collect_acyclic_refs()
assert 'def' in ns
assert 'abc' in ns
def test_weak_receiver():
sentinel = []
def received(sender, **kw):
sentinel.append(kw)
sig = blinker.Signal()
# XXX: weirdly, under jython an explicit weak=True causes this test
# to fail, leaking a strong ref to the receiver somewhere.
# http://bugs.jython.org/issue1586
if jython:
sig.connect(received) # weak=True by default.
else:
sig.connect(received, weak=True)
del received
collect_acyclic_refs()
assert not sentinel
sig.send()
assert not sentinel
assert not sig.receivers
values_are_empty_sets_(sig._by_receiver)
values_are_empty_sets_(sig._by_sender)
def test_strong_receiver():
sentinel = []
def received(sender):
sentinel.append(sender)
fn_id = id(received)
sig = blinker.Signal()
sig.connect(received, weak=False)
del received
collect_acyclic_refs()
assert not sentinel
sig.send()
assert sentinel
assert [id(fn) for fn in sig.receivers.values()] == [fn_id]
def test_instancemethod_receiver():
sentinel = []
class Receiver(object):
def __init__(self, bucket):
self.bucket = bucket
def received(self, sender):
self.bucket.append(sender)
receiver = Receiver(sentinel)
sig = blinker.Signal()
sig.connect(receiver.received)
assert not sentinel
sig.send()
assert sentinel
del receiver
collect_acyclic_refs()
sig.send()
assert len(sentinel) == 1
def test_filtered_receiver():
sentinel = []
def received(sender):
sentinel.append(sender)
sig = blinker.Signal()
sig.connect(received, 123)
assert not sentinel
sig.send()
assert not sentinel
sig.send(123)
assert sentinel == [123]
sig.send()
assert sentinel == [123]
sig.disconnect(received, 123)
sig.send(123)
assert sentinel == [123]
sig.connect(received, 123)
sig.send(123)
assert sentinel == [123, 123]
sig.disconnect(received)
sig.send(123)
assert sentinel == [123, 123]
def test_filtered_receiver_weakref():
sentinel = []
def received(sender):
sentinel.append(sender)
class Object(object):
pass
obj = Object()
sig = blinker.Signal()
sig.connect(received, obj)
assert not sentinel
sig.send(obj)
assert sentinel == [obj]
del sentinel[:]
del obj
collect_acyclic_refs()
# general index isn't cleaned up
assert sig.receivers
# but receiver/sender pairs are
values_are_empty_sets_(sig._by_receiver)
values_are_empty_sets_(sig._by_sender)
def test_decorated_receiver():
sentinel = []
class Object(object):
pass
obj = Object()
sig = blinker.Signal()
@sig.connect_via(obj)
def receiver(sender, **kw):
sentinel.append(kw)
assert not sentinel
sig.send()
assert not sentinel
sig.send(1)
assert not sentinel
sig.send(obj)
assert sig.receivers
del receiver
collect_acyclic_refs()
assert sig.receivers
def test_no_double_send():
sentinel = []
def received(sender):
sentinel.append(sender)
sig = blinker.Signal()
sig.connect(received, 123)
sig.connect(received)
assert not sentinel
sig.send()
assert sentinel == [None]
sig.send(123)
assert sentinel == [None, 123]
sig.send()
assert sentinel == [None, 123, None]
def test_has_receivers():
received = lambda sender: None
sig = blinker.Signal()
assert not sig.has_receivers_for(None)
assert not sig.has_receivers_for(blinker.ANY)
sig.connect(received, 'xyz')
assert not sig.has_receivers_for(None)
assert not sig.has_receivers_for(blinker.ANY)
assert sig.has_receivers_for('xyz')
class Object(object):
pass
o = Object()
sig.connect(received, o)
assert sig.has_receivers_for(o)
del received
collect_acyclic_refs()
assert not sig.has_receivers_for('xyz')
assert list(sig.receivers_for('xyz')) == []
assert list(sig.receivers_for(o)) == []
sig.connect(lambda sender: None, weak=False)
assert sig.has_receivers_for('xyz')
assert sig.has_receivers_for(o)
assert sig.has_receivers_for(None)
assert sig.has_receivers_for(blinker.ANY)
assert sig.has_receivers_for('xyz')
def test_instance_doc():
sig = blinker.Signal(doc='x')
assert sig.__doc__ == 'x'
sig = blinker.Signal('x')
assert sig.__doc__ == 'x'
def test_named_blinker():
sig = blinker.NamedSignal('squiznart')
assert 'squiznart' in repr(sig)
def values_are_empty_sets_(dictionary):
for val in dictionary.values():
assert val == set()
|
|
#!/usr/bin/env python
import os
import sys
import stat
import errno
import logging
import StringIO
try:
import _find_fuse_parts
except ImportError:
pass
import fuse
from linode import Api
fuse.fuse_python_api = (0, 2)
write_cache = {}
class LinodeFSStat(fuse.Stat):
def __init__(self):
self.st_mode = 0
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 0
self.st_uid = 0
self.st_gid = 0
self.st_size = 0
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
class LinodeFS(fuse.Fuse):
_api = None
_objects_to_create = []
_linodes = []
def __init__(self, *args, **kwargs):
fuse.Fuse.__init__(self, *args, **kwargs)
logging.basicConfig(filename='linodefs.log', level=logging.DEBUG)
logging.debug("Starting LinodeFS")
def make_connection(self):
if hasattr(self, 'api_url'):
API.endpoint = self.api_url
self._api = Api(self.api_key)
def get_cached_linodes(self):
if not self._linodes:
self._linodes = self.api_handle.linode.list()
return self._linodes
def get_linode_by_name(self, name):
linodes = self.get_cached_linodes()
return next(linode for linode in linodes if linode['LABEL']==name)
@property
def api_handle(self):
if not self._api:
self.make_connection
return self._api
def _read_linode_names(self):
linodes = self.get_cached_linodes()
logging.debug("%s" % linodes)
return [linode['LABEL'] for linode in linodes]
def _get_object(self, path_tokens):
"""Return an object instance from path_tokens (i.e. result
of path.split('/') or None if object doesn't exist"""
linode_name, object_name = path_tokens[1], path_tokens[2]
try:
linode = self.get_linode_by_name(linode_name)
return linode[object_name]
except (ContainerDoesNotExistError, ObjectDoesNotExistError):
return None
def getattr(self, path):
logging.debug("getattr(path='%s')" % path)
st = LinodeFSStats()
if path == '/':
st.st_mode = stat.S_IFDIR | 0755
st.st_nlink = 2
return st
elif path in self._objects_to_create:
logging.debug("getattr(path='%s'): file is scheduled for creation" % (path))
st.st_mode = stat.S_IFREG | 0644
st.st_nlink = 1
st.st_size = 0
return st
path_tokens = path.split('/')
if 2 == len(path_tokens):
linode_names = self._read_linode_names()
if path_tokens[1] in linode_names:
st.st_mode = stat.S_IFDIR | 0755
st.st_nlink = 2
return st
else:
return -errno.ENOENT
elif 3 == len(path_tokens):
obj = self._get_object(path_tokens)
if obj:
st.st_mode = stat.S_IFREG | 0444
st.st_nlink = 1
st.st_size = obj.size
else:
# getattr() might be called for a new file which doesn't
# exist yet, so we need to make it writable in such case
#st.st_mode = stat.S_IFREG | 0644
#st.st_nlink = 1
#st.st_size = 0
return -errno.ENOENT
return st
return -errno.ENOENT
def readdir(self, path, offset):
logging.debug("readdir(path='%s', offset='%s')" % (path, offset))
if "/" == path:
try:
linode_names = self._read_linode_names()
logging.debug("linode names = %s" % linode_names)
dirs = [".", ".."] + linode_names
logging.debug("dirs = %s" % dirs)
for r in dirs:
logging.debug("yielding %s" % r)
yield fuse.Direntry(r)
#return dirs
except Exception:
logging.exception("exception in readdir()")
else:
path_tokens = path.split("/")
if 2 != len(path_tokens):
# we should only have 1 level depth
logging.warning("Path '%s' is deeper than it should" % path)
return
try:
linode_name = path_tokens[1]
linode = self.get_linode_by_name(linode_name)
dirs = [".", "..","info"] + [str('disk'+obj['DISKID']) for disk in
self.api_handle.linode.disk.list({linodeid:linode['LINODEID']})]
logging.debug("dirs = %s" % dirs)
for r in dirs:
yield fuse.Direntry(r)
except Exception:
logging.exception("exception while trying to list container objects")
def mkdir(self, path, mode):
logging.debug("mkdir(path='%s', mode='%s')" % (path, mode))
path_tokens = path.split('/')
if 2 != len(path_tokens):
logging.warning("attempting to create a non-container dir %s" % path)
return -errno.EOPNOTSUPP
linode_name = path_tokens[1]
self.api_handle.linode.create(container_name)
return 0
def rmdir(self, path):
logging.debug("rmdir(path='%s')" % (path,))
path_tokens = path.split('/')
if 1 == len(path_tokens):
return -errno.EPERM
elif 2 == len(path_tokens):
container_name = path_tokens[1]
try:
container = self.api_handle.get_container(container_name)
except ContainerDoesNotExistError:
return -errno.ENOENT
if 0 != len(container.list_objects()):
return -errno.ENOTEMPTY
container.delete()
return 0
elif 3 <= len(path_tokens):
return -errno.EOPNOTSUPP
def mknod(self, path, mode, dev):
logging.debug("mknod(path='%s', mode='%s', dev='%s')" % (path, mode, dev))
try:
path_tokens = path.split('/')
if 3 != len(path_tokens):
return -errno.EPERM
container_name = path_tokens[1]
object_name = path_tokens[2]
self.api_handle.upload_object_via_stream(StringIO.StringIO('\n'),
self.api_handle.get_container(container_name),
object_name,
extra={"content_type": "application/octet-stream"})
return 0
except Exception:
logging.exception("exception in mknod()")
def open(self, path, flags):
logging.debug("open(path='%s', flags='%s')" % (path, flags))
return 0
path_tokens = path.split('/')
if 3 != len(path_tokens):
logging.warning("path_tokens != 3")
return -errno.EOPNOTSUPP
try:
# obj = self._get_object(path_tokens)
# # we allow opening existing files in read-only mode
# if obj:
# accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
# if (flags & accmode) != os.O_RDONLY:
# return -errno.EACCES
return 0
except Exception:
logging.exception("exception in open()")
def read(self, path, size, offset):
logging.debug("read(path='%s', size=%s, offset=%s)" % (path, size, offset))
path_tokens = path.split('/')
if 3 != len(path_tokens):
return -errno.EOPNOTSUPP
try:
obj = self._get_object(path_tokens)
except (ContainerDoesNotExistError, ObjectDoesNotExistError):
return -errno.ENOENT
try:
content = ''.join([line for line in obj.as_stream()])
except:
logging.exception("error reading file content")
return
slen = len(content)
if offset < slen:
if offset + size > slen:
size = slen - offset
response = content[offset:offset+size]
else:
response = ''
return response
def write(self, path, buff, offset):
logging.debug("write(path='%s', buff=<skip>, offset='%s')" % (path, offset))
try:
if path not in write_cache:
write_cache[path] = [buff,]
else:
write_cache.append(buff)
return len(buff)
except Exception:
logging.exception("exception in write()")
def unlink(self, path):
logging.debug("unlink(path='%s')" % (path,))
try:
path_tokens = path.split('/')
if 3 != len(path_tokens):
return
obj = self._get_object(path_tokens)
if not obj:
return -errno.ENOENT
obj.delete()
return 0
except Exception:
logging.exception("error while processing unlink()")
def release(self, path, flags):
logging.debug("release(path='%s', flags='%s')" % (path, flags))
# XXX: what's the nature of this?
if "-" == path:
return 0
try:
path_tokens = path.split("/")
container_name, object_name = path_tokens[1], path_tokens[2]
if len(write_cache[path]) > 0:
self.unlink(path)
self.api_handle.upload_object_via_stream(StringIO.StringIO(''.join(write_cache[path])),
self.api_handle.get_container(container_name),
object_name,
extra={"content_type": "application/octet-stream"})
del write_cache[path]
return 0
except KeyError:
logging.warning("no cached entry for path: %s" % path)
return 0
except Exception:
logging.exception("exception in release()")
def truncate(self, path, size):
return 0
def utime(self, path, times):
return 0
def fsync(self, path, isfsyncfile):
return 0
def main():
usage="""
LinodeFS
""" + fuse.Fuse.fusage
server = LinodeFS(version="%prog " + fuse.__version__,
usage=usage,
dash_s_do='setsingle')
server.parser.add_option(mountopt='api_key', metavar='API_KEY',
help=("API Key"))
server.parser.add_option(mountopt='api_url', metavar='API_URL',
help=("API URL"))
server.parse(values=server, errex=1)
if not (hasattr(server, 'api_key')):
print >>sys.stderr, "Please specify an API Key."
sys.exit(1)
try:
server.make_connection()
except Exception, err:
print >>sys.stderr, "Cannot connect to Linode API: %s" % str(err)
sys.exit(1)
server.main()
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import mock
from django.utils.timezone import now as timezone_now
from zerver.lib.soft_deactivation import (
do_soft_deactivate_user,
do_soft_deactivate_users,
get_users_for_soft_deactivation,
do_soft_activate_users,
get_soft_deactivated_users_for_catch_up,
do_catch_up_soft_deactivated_users,
do_auto_soft_deactivate_users
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import (
Client, UserProfile, UserActivity, get_realm, UserMessage
)
class UserSoftDeactivationTests(ZulipTestCase):
def test_do_soft_deactivate_user(self) -> None:
user = self.example_user('hamlet')
self.assertFalse(user.long_term_idle)
with mock.patch('logging.info'):
do_soft_deactivate_user(user)
user.refresh_from_db()
self.assertTrue(user.long_term_idle)
def test_do_soft_deactivate_users(self) -> None:
users = [
self.example_user('hamlet'),
self.example_user('iago'),
self.example_user('cordelia'),
]
for user in users:
self.assertFalse(user.long_term_idle)
# We are sending this message to ensure that users have at least
# one UserMessage row.
self.send_huddle_message(users[0].email,
[user.email for user in users])
with mock.patch('logging.info'):
do_soft_deactivate_users(users)
for user in users:
user.refresh_from_db()
self.assertTrue(user.long_term_idle)
def test_get_users_for_soft_deactivation(self) -> None:
users = [
self.example_user('hamlet'),
self.example_user('iago'),
self.example_user('cordelia'),
self.example_user('ZOE'),
self.example_user('othello'),
self.example_user('prospero'),
self.example_user('aaron'),
self.example_user('polonius'),
]
client, _ = Client.objects.get_or_create(name='website')
query = '/json/users/me/pointer'
last_visit = timezone_now()
count = 150
for user_profile in UserProfile.objects.all():
UserActivity.objects.get_or_create(
user_profile=user_profile,
client=client,
query=query,
count=count,
last_visit=last_visit
)
filter_kwargs = dict(user_profile__realm=get_realm('zulip'))
users_to_deactivate = get_users_for_soft_deactivation(-1, filter_kwargs)
self.assert_length(users_to_deactivate, 8)
for user in users_to_deactivate:
self.assertTrue(user in users)
def test_do_soft_activate_users(self) -> None:
users = [
self.example_user('hamlet'),
self.example_user('iago'),
self.example_user('cordelia'),
]
self.send_huddle_message(users[0].email,
[user.email for user in users])
with mock.patch('logging.info'):
do_soft_deactivate_users(users)
for user in users:
self.assertTrue(user.long_term_idle)
with mock.patch('logging.info'):
do_soft_activate_users(users)
for user in users:
user.refresh_from_db()
self.assertFalse(user.long_term_idle)
def test_get_users_for_catch_up(self) -> None:
users = [
self.example_user('hamlet'),
self.example_user('iago'),
self.example_user('cordelia'),
self.example_user('ZOE'),
self.example_user('othello'),
self.example_user('prospero'),
self.example_user('aaron'),
self.example_user('polonius'),
]
for user_profile in UserProfile.objects.all():
user_profile.long_term_idle = True
user_profile.save(update_fields=['long_term_idle'])
filter_kwargs = dict(realm=get_realm('zulip'))
users_to_catch_up = get_soft_deactivated_users_for_catch_up(filter_kwargs)
self.assert_length(users_to_catch_up, 8)
for user in users_to_catch_up:
self.assertTrue(user in users)
def test_do_catch_up_users(self) -> None:
stream = 'Verona'
hamlet = self.example_user('hamlet')
users = [
self.example_user('iago'),
self.example_user('cordelia'),
]
all_users = users + [hamlet]
for user in all_users:
self.subscribe(user, stream)
with mock.patch('logging.info'):
do_soft_deactivate_users(users)
for user in users:
self.assertTrue(user.long_term_idle)
message_id = self.send_stream_message(hamlet.email, stream, 'Hello world!')
already_received = UserMessage.objects.filter(message_id=message_id).count()
with mock.patch('logging.info'):
do_catch_up_soft_deactivated_users(users)
catch_up_received = UserMessage.objects.filter(message_id=message_id).count()
self.assertEqual(already_received + len(users), catch_up_received)
for user in users:
user.refresh_from_db()
self.assertTrue(user.long_term_idle)
self.assertEqual(user.last_active_message_id, message_id)
def test_do_auto_soft_deactivate_users(self) -> None:
users = [
self.example_user('iago'),
self.example_user('cordelia'),
self.example_user('ZOE'),
self.example_user('othello'),
self.example_user('prospero'),
self.example_user('aaron'),
self.example_user('polonius'),
]
sender = self.example_user('hamlet')
realm = get_realm('zulip')
stream_name = 'announce'
for user in users + [sender]:
self.subscribe(user, stream_name)
client, _ = Client.objects.get_or_create(name='website')
query = '/json/users/me/pointer'
last_visit = timezone_now()
count = 150
for user_profile in users:
UserActivity.objects.get_or_create(
user_profile=user_profile,
client=client,
query=query,
count=count,
last_visit=last_visit
)
with mock.patch('logging.info'):
users_deactivated = do_auto_soft_deactivate_users(-1, realm)
self.assert_length(users_deactivated, len(users))
for user in users:
self.assertTrue(user in users_deactivated)
# Verify that deactivated users are caught up automatically
message_id = self.send_stream_message(sender.email, stream_name)
received_count = UserMessage.objects.filter(user_profile__in=users,
message_id=message_id).count()
self.assertEqual(0, received_count)
with mock.patch('logging.info'):
users_deactivated = do_auto_soft_deactivate_users(-1, realm)
self.assert_length(users_deactivated, 0) # all users are already deactivated
received_count = UserMessage.objects.filter(user_profile__in=users,
message_id=message_id).count()
self.assertEqual(len(users), received_count)
# Verify that deactivated users are NOT caught up if
# AUTO_CATCH_UP_SOFT_DEACTIVATED_USERS is off
message_id = self.send_stream_message(sender.email, stream_name)
received_count = UserMessage.objects.filter(user_profile__in=users,
message_id=message_id).count()
self.assertEqual(0, received_count)
with self.settings(AUTO_CATCH_UP_SOFT_DEACTIVATED_USERS=False):
with mock.patch('logging.info'):
users_deactivated = do_auto_soft_deactivate_users(-1, realm)
self.assert_length(users_deactivated, 0) # all users are already deactivated
received_count = UserMessage.objects.filter(user_profile__in=users,
message_id=message_id).count()
self.assertEqual(0, received_count)
|
|
#!/usr/bin/python
import sys
from klampt import *
from klampt.glprogram import *
from OpenGL.GLUT import *
import math
#Sphero local axes: [0,0,1] is "turn", [1,0,0] is "drive"
#can be 'left','up','down','right', 'home', 'insert', 'end', and the function keys.
keymap = {'up':(0,[-1,0,0]),'down':(0,[1,0,0]),'left':(0,[0,0,-1]),'right':(0,[0,0,1])}
glutspecialmap = {
GLUT_KEY_F1:'f1',
GLUT_KEY_F2:'f2',
GLUT_KEY_F3:'f3',
GLUT_KEY_F4:'f4',
GLUT_KEY_F5:'f5',
GLUT_KEY_F6:'f6',
GLUT_KEY_F7:'f7',
GLUT_KEY_F8:'f8',
GLUT_KEY_F9:'f9',
GLUT_KEY_F10:'f10',
GLUT_KEY_F11:'f11',
GLUT_KEY_F12:'f12',
GLUT_KEY_LEFT:'left',
GLUT_KEY_UP:'up',
GLUT_KEY_RIGHT:'right',
GLUT_KEY_DOWN:'down',
GLUT_KEY_PAGE_UP:'pageup',
GLUT_KEY_PAGE_DOWN:'pagedown',
GLUT_KEY_HOME:'home',
GLUT_KEY_END:'end',
GLUT_KEY_INSERT:'insert'
}
def euler_zyx_moments(theta):
"""For the zyx euler angles theta=(rz,ry,rx), produces a matrix A such that
A*dtheta is the angular velocities when dtheta is the rate of change of the
euler angles"""
eu = [0,0,1]
ev = [0,1,0]
ew = [1,0,0]
Ru = so3.rotation([0,0,1],theta[0])
Rv = so3.rotation([0,1,0],theta[1])
col1 = eu
col2 = so3.apply(Ru,ev)
col3 = so3.apply(Ru,so3.apply(Rv,ew))
#col1 = [0,0,1]
#col2 = [c0 -s0 0] [0] = [-s0]
# [s0 c0 0]*[1] [c0 ]
# [0 0 1] [0] [0 ]
#col3 = Ru*[c1 0 s1] [1] = Ru*[c1 ] = [c1c0]
# [0 1 0 ]*[0] [0 ] [c1s0]
# [-s1 0 c1] [0] [-s1] [-s1 ]
#A = [ 0 -s0 c1c0]
# [ 0 c0 c1s0]
# [ 1 0 -s1 ]
return zip(col1,col2,col3)
def euler_zyx_moments_inv(theta):
"""Returns the inverse of the matrix returned by the above procedure"""
c0 = math.cos(theta[0])
s0 = math.sin(theta[0])
c1 = math.cos(theta[1])
s1 = math.sin(theta[1])
#A = [ 0 -s0 c1c0]
# [ 0 c0 c1s0]
# [ 1 0 -s1 ]
#det(A) = -c1
#A^-1 = 1/c1*[ s1c0 s0s1 c1 ]
# [-c1s0 c1c0 0 ]
# [ c0 s0 0 ]
#A^-1*A = 1/c1*[c1 -s0s1c0+c0s0s1 s1c1c0^2+s1c1s0^2-c1s1 ] = [1 0 0]
# [0 c1s0^2+c1c0^2 -c0c1^2s0+s0c1^2c0 ] [0 1 0]
# [0 -s0c0+s0c0 c1c0^2+c1s0^2 ] [0 0 1]
sec1 = 1.0/c1
return [[c0*s1/c1,s0*s1/c1,1],
[-s0,c0,0],
[c0/c1,s0/c1,0]]
class Emulator:
def __init__(self,sim,robotIndex = 0):
self.sim = sim
self.robotIndex = robotIndex
self.controller = sim.getController(robotIndex)
self.robot = sim.getWorld().robot(robotIndex)
#indices: turn and drive, respectively
self.velocityLimits = [180*math.pi/180,1080*math.pi/180]
self.accelLimits = [360*math.pi/180,2080*math.pi/180]
self.motorSpeeds = [0,0]
#velocity locking gain
self.velocityLockGain = 0.1
#rolling friction
self.rollingFrictionCoeff = 0.1
#timestep
self.dt = 0.01
def send_command(self,twist):
assert twist[1] == 0
#compute the angular velocity of the shell in the motor frame
motorBody = self.sim.getBody(self.robot.getLink(5))
shellBody = self.sim.getBody(self.robot.getLink(8))
motorTwist = motorBody.getVelocity()
shellTwist = shellBody.getVelocity()
motorXform = motorBody.getTransform()
shellXform = shellBody.getTransform()
shellRelativeAvel = so3.apply(so3.inv(motorXform[0]),vectorops.sub(shellTwist[0],motorTwist[0]))
#print "Relative angular vel",shellRelativeAvel
desiredTurnSpeed = twist[2]*self.velocityLimits[0]
desiredDriveSpeed = 0
if twist[0] == 0 or twist[0]*self.motorSpeeds[1] < 0: #stop
desiredDriveSpeed = 0
else:
desiredDriveSpeed = self.motorSpeeds[1]+twist[0]*self.accelLimits[1]*self.dt
#print "Turn des",desiredTurnSpeed, "drive des",desiredDriveSpeed
#clamp speeds to limits
desiredTurnSpeed = max(-self.velocityLimits[0],min(desiredTurnSpeed,self.velocityLimits[0]))
desiredDriveSpeed = max(-self.velocityLimits[1],min(desiredDriveSpeed,self.velocityLimits[1]))
terr = desiredTurnSpeed - self.motorSpeeds[0]
derr = desiredDriveSpeed - self.motorSpeeds[1]
#clamp desired accelerations to limits
terr = max(-self.accelLimits[0]*self.dt,min(terr,self.accelLimits[0]*self.dt))
derr = max(-self.accelLimits[1]*self.dt,min(derr,self.accelLimits[1]*self.dt))
self.motorSpeeds[0] += terr
self.motorSpeeds[1] += derr
#apply locked velocity control to bring shell up to relative speed
#this is the desired angular velocity of the shell in the motor
#coordinates
desiredShellAvel = [self.motorSpeeds[1],0,self.motorSpeeds[0]]
#print "Desired angular vel",desiredShellAvel
relativeVelError = vectorops.sub(desiredShellAvel,shellRelativeAvel)
wrenchlocal = vectorops.mul(relativeVelError,self.velocityLockGain)
#local wrench is k*(wdes-wrel)
wrench = so3.apply(motorXform[0],wrenchlocal)
#print "Wrench to shell",wrench
motorBody.applyWrench([0,0,0],vectorops.mul(wrench,-1.0))
shellBody.applyWrench([0,0,0],wrench)
#disable PID controllers
self.controller.setTorque([0,0,0])
#apply rolling friction forces
shellBody.applyWrench([0,0,0],vectorops.mul(shellTwist[0],-self.rollingFrictionCoeff))
return
class MyGLViewer(GLRealtimeProgram):
def __init__(self,world):
global keymap
GLRealtimeProgram.__init__(self,"My GL program")
self.world = world
self.keymap = keymap
self.current_velocities = {}
#Put your initialization code here
#the current example creates a collision class, simulator,
#simulation flag, and screenshot flags
self.collider = robotcollide.WorldCollider(world)
self.sim = Simulator(world)
self.simulate = False
#initialize emulators
self.spheros = [Emulator(self.sim,r) for r in range(world.numRobots())]
self.saveScreenshots = False
self.nextScreenshotTime = 0
self.screenshotCount = 0
def display(self):
#Put your display handler here
#the current example draws the simulated world in grey and the
#commanded configurations in transparent green
self.sim.updateWorld()
self.world.drawGL()
return
def control_loop(self):
#Calculate the desired velocity for each robot by adding up all
#commands
rvels = [[0]*3 for r in range(self.world.numRobots())]
for (c,(r,v)) in self.current_velocities.iteritems():
rvels[r] = vectorops.add(rvels[r],v)
#send to the robot(s)
for r in range(self.world.numRobots()):
self.spheros[r].send_command(rvels[r])
return
def idle(self):
#Put your idle loop handler here
#the current example simulates with the current time step self.dt
if self.simulate and self.saveScreenshots:
if self.ttotal >= self.nextScreenshotTime:
self.save_screen("image%04d.ppm"%(self.screenshotCount,))
self.screenshotCount += 1
self.nextScreenshotTime += 1.0/30.0;
if self.simulate:
self.control_loop()
self.sim.simulate(self.dt)
glutPostRedisplay()
def mousefunc(self,button,state,x,y):
#Put your mouse handler here
#the current example prints out the list of objects clicked whenever
#you right click
if button==2:
if state==0:
print [o.getName() for o in self.click_world(x,y)]
return
GLRealtimeProgram.mousefunc(self,button,state,x,y)
def specialfunc(self,c,x,y):
#Put your keyboard special character handler here
if c in glutspecialmap:
name = glutspecialmap[c]
#print name,"pressed"
if name in self.keymap:
self.current_velocities[name]=self.keymap[name]
pass
def specialupfunc(self,c,x,y):
#Put your keyboard special character handler here
if c in glutspecialmap:
name = glutspecialmap[c]
#print name,"unpressed"
if name in self.current_velocities:
del self.current_velocities[name]
pass
def keyboardfunc(self,c,x,y):
#Put your keyboard handler here
#the current example toggles simulation / movie mode
if c == 's':
self.simulate = not self.simulate
print "Simulating:",self.simulate
elif c == 'm':
self.saveScreenshots = not self.saveScreenshots
print "Movie mode:",self.saveScreenshots
elif c == 'h':
print 'Available keys:',sorted(self.keymap.keys())
elif c in self.keymap:
self.current_velocities[c]=self.keymap[c]
glutPostRedisplay()
def keyboardupfunc(self,c,x,y):
if c in self.current_velocities:
del self.current_velocities[c]
return
def click_world(self,x,y):
"""Helper: returns a list of world objects sorted in order of
increasing distance."""
#get the viewport ray
(s,d) = self.click_ray(x,y)
#run the collision tests
self.collider.updateFrames()
collided = []
for g in self.collider.geomList:
(hit,pt) = collide.rayCast(g[1],s,d)
if hit:
dist = vectorops.dot(vectorops.sub(pt,s),d)
collided.append((dist,g[0]))
return [g[1] for g in sorted(collided)]
if __name__ == "__main__":
print "kbdrive.py: This example demonstrates how to drive a robot using keyboard input"
if len(sys.argv)<=1:
print "USAGE: kbdrive.py [world_file]"
sys.argv = [sys.argv[0],'sphero_data/sphero.xml']
world = WorldModel()
for fn in sys.argv[1:]:
res = world.readFile(fn)
if not res:
raise RuntimeError("Unable to load model "+fn)
viewer = MyGLViewer(world)
viewer.run()
|
|
# Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import, print_function, with_statement
__author__ = "Microsoft Corporation <[email protected]>"
__version__ = "3.0.0.0"
import ctypes
import datetime
import os
import re
import struct
import sys
import traceback
from xml.dom import minidom
try:
from cStringIO import StringIO
BytesIO = StringIO
except ImportError:
from io import StringIO, BytesIO
try:
from thread import start_new_thread
except ImportError:
from _thread import start_new_thread
__version__ = '3.0.0'
if sys.version_info[0] == 3:
def to_str(value):
return value.decode(sys.getfilesystemencoding())
else:
def to_str(value):
return value.encode(sys.getfilesystemencoding())
# http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S3
FCGI_VERSION_1 = 1
FCGI_HEADER_LEN = 8
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = "FCGI_MAX_CONNS"
FCGI_MAX_REQS = "FCGI_MAX_REQS"
FCGI_MPXS_CONNS = "FCGI_MPXS_CONNS"
class FastCgiRecord(object):
"""Represents a FastCgiRecord. Encapulates the type, role, flags. Holds
onto the params which we will receive and update later."""
def __init__(self, type, req_id, role, flags):
self.type = type
self.req_id = req_id
self.role = role
self.flags = flags
self.params = {}
def __repr__(self):
return '<FastCgiRecord(%d, %d, %d, %d)>' % (self.type,
self.req_id,
self.role,
self.flags)
#typedef struct {
# unsigned char version;
# unsigned char type;
# unsigned char requestIdB1;
# unsigned char requestIdB0;
# unsigned char contentLengthB1;
# unsigned char contentLengthB0;
# unsigned char paddingLength;
# unsigned char reserved;
# unsigned char contentData[contentLength];
# unsigned char paddingData[paddingLength];
#} FCGI_Record;
class _ExitException(Exception):
pass
if sys.version_info[0] >= 3:
# indexing into byte strings gives us an int, so
# ord is unnecessary on Python 3
def ord(x):
return x
def chr(x):
return bytes((x, ))
def wsgi_decode(x):
return x.decode('iso-8859-1')
def wsgi_encode(x):
return x.encode('iso-8859-1')
def fs_encode(x):
return x
def exception_with_traceback(exc_value, exc_tb):
return exc_value.with_traceback(exc_tb)
zero_bytes = bytes
else:
# Replace the builtin open with one that supports an encoding parameter
from codecs import open
def wsgi_decode(x):
return x
def wsgi_encode(x):
return x
def fs_encode(x):
return x if isinstance(x, str) else x.encode(sys.getfilesystemencoding())
def exception_with_traceback(exc_value, exc_tb):
# x.with_traceback() is not supported on 2.x
return exc_value
bytes = str
def zero_bytes(length):
return '\x00' * length
def read_fastcgi_record(stream):
"""reads the main fast cgi record"""
data = stream.read(8) # read record
if not data:
# no more data, our other process must have died...
raise _ExitException()
fcgi_ver, reqtype, req_id, content_size, padding_len, _ = struct.unpack('>BBHHBB', data)
content = stream.read(content_size) # read content
stream.read(padding_len)
if fcgi_ver != FCGI_VERSION_1:
raise Exception('Unknown fastcgi version %s' % fcgi_ver)
processor = REQUEST_PROCESSORS.get(reqtype)
if processor is not None:
return processor(stream, req_id, content)
# unknown type requested, send response
log('Unknown request type %s' % reqtype)
send_response(stream, req_id, FCGI_UNKNOWN_TYPE, chr(reqtype) + zero_bytes(7))
return None
def read_fastcgi_begin_request(stream, req_id, content):
"""reads the begin request body and updates our _REQUESTS table to include
the new request"""
# typedef struct {
# unsigned char roleB1;
# unsigned char roleB0;
# unsigned char flags;
# unsigned char reserved[5];
# } FCGI_BeginRequestBody;
# TODO: Ignore request if it exists
res = FastCgiRecord(
FCGI_BEGIN_REQUEST,
req_id,
(ord(content[0]) << 8) | ord(content[1]), # role
ord(content[2]), # flags
)
_REQUESTS[req_id] = res
def read_encoded_int(content, offset):
i = struct.unpack_from('>B', content, offset)[0]
if i < 0x80:
return offset + 1, i
return offset + 4, struct.unpack_from('>I', content, offset)[0] & ~0x80000000
def read_fastcgi_keyvalue_pairs(content, offset):
"""Reads a FastCGI key/value pair stream"""
offset, name_len = read_encoded_int(content, offset)
offset, value_len = read_encoded_int(content, offset)
name = content[offset:(offset + name_len)]
offset += name_len
value = content[offset:(offset + value_len)]
offset += value_len
return offset, name, value
def get_encoded_int(i):
"""Writes the length of a single name for a key or value in a key/value
stream"""
if i <= 0x7f:
return struct.pack('>B', i)
elif i < 0x80000000:
return struct.pack('>I', i | 0x80000000)
else:
raise ValueError('cannot encode value %s (%x) because it is too large' % (i, i))
def write_fastcgi_keyvalue_pairs(pairs):
"""Creates a FastCGI key/value stream and returns it as a byte string"""
parts = []
for raw_key, raw_value in pairs.items():
key = wsgi_encode(raw_key)
value = wsgi_encode(raw_value)
parts.append(get_encoded_int(len(key)))
parts.append(get_encoded_int(len(value)))
parts.append(key)
parts.append(value)
return bytes().join(parts)
# Keys in this set will be stored in the record without modification but with a
# 'wsgi.' prefix. The original key will have the decoded version.
# (Following mod_wsgi from http://wsgi.readthedocs.org/en/latest/python3.html)
RAW_VALUE_NAMES = {
'SCRIPT_NAME' : 'wsgi.script_name',
'PATH_INFO' : 'wsgi.path_info',
'QUERY_STRING' : 'wsgi.query_string',
'HTTP_X_ORIGINAL_URL' : 'wfastcgi.http_x_original_url',
}
def read_fastcgi_params(stream, req_id, content):
if not content:
return None
offset = 0
res = _REQUESTS[req_id].params
while offset < len(content):
offset, name, value = read_fastcgi_keyvalue_pairs(content, offset)
name = wsgi_decode(name)
raw_name = RAW_VALUE_NAMES.get(name)
if raw_name:
res[raw_name] = value
res[name] = wsgi_decode(value)
def read_fastcgi_input(stream, req_id, content):
"""reads FastCGI std-in and stores it in wsgi.input passed in the
wsgi environment array"""
res = _REQUESTS[req_id].params
if 'wsgi.input' not in res:
res['wsgi.input'] = content
else:
res['wsgi.input'] += content
if not content:
# we've hit the end of the input stream, time to process input...
return _REQUESTS[req_id]
def read_fastcgi_data(stream, req_id, content):
"""reads FastCGI data stream and publishes it as wsgi.data"""
res = _REQUESTS[req_id].params
if 'wsgi.data' not in res:
res['wsgi.data'] = content
else:
res['wsgi.data'] += content
def read_fastcgi_abort_request(stream, req_id, content):
"""reads the wsgi abort request, which we ignore, we'll send the
finish execution request anyway..."""
pass
def read_fastcgi_get_values(stream, req_id, content):
"""reads the fastcgi request to get parameter values, and immediately
responds"""
offset = 0
request = {}
while offset < len(content):
offset, name, value = read_fastcgi_keyvalue_pairs(content, offset)
request[name] = value
response = {}
if FCGI_MAX_CONNS in request:
response[FCGI_MAX_CONNS] = '1'
if FCGI_MAX_REQS in request:
response[FCGI_MAX_REQS] = '1'
if FCGI_MPXS_CONNS in request:
response[FCGI_MPXS_CONNS] = '0'
send_response(
stream,
req_id,
FCGI_GET_VALUES_RESULT,
write_fastcgi_keyvalue_pairs(response)
)
# Our request processors for different FastCGI protocol requests. Only those
# requests that we receive are defined here.
REQUEST_PROCESSORS = {
FCGI_BEGIN_REQUEST : read_fastcgi_begin_request,
FCGI_ABORT_REQUEST : read_fastcgi_abort_request,
FCGI_PARAMS : read_fastcgi_params,
FCGI_STDIN : read_fastcgi_input,
FCGI_DATA : read_fastcgi_data,
FCGI_GET_VALUES : read_fastcgi_get_values
}
APPINSIGHT_CLIENT = None
def log(txt):
"""Logs messages to a log file if WSGI_LOG env var is defined."""
if APPINSIGHT_CLIENT:
try:
APPINSIGHT_CLIENT.track_event(txt)
except:
pass
log_file = os.environ.get('WSGI_LOG')
if log_file:
with open(log_file, 'a+', encoding='utf-8') as f:
txt = txt.replace('\r\n', '\n')
f.write('%s: %s%s' % (datetime.datetime.now(), txt, '' if txt.endswith('\n') else '\n'))
def maybe_log(txt):
"""Logs messages to a log file if WSGI_LOG env var is defined, and does not
raise exceptions if logging fails."""
try:
log(txt)
except:
pass
def send_response(stream, req_id, resp_type, content, streaming=True):
"""sends a response w/ the given id, type, and content to the server.
If the content is streaming then an empty record is sent at the end to
terminate the stream"""
if not isinstance(content, bytes):
raise TypeError("content must be encoded before sending: %r" % content)
offset = 0
while True:
len_remaining = max(min(len(content) - offset, 0xFFFF), 0)
data = struct.pack(
'>BBHHBB',
FCGI_VERSION_1, # version
resp_type, # type
req_id, # requestIdB1:B0
len_remaining, # contentLengthB1:B0
0, # paddingLength
0, # reserved
) + content[offset:(offset + len_remaining)]
offset += len_remaining
os.write(stream.fileno(), data)
if len_remaining == 0 or not streaming:
break
stream.flush()
def get_environment(dir):
web_config = os.path.join(dir, 'Web.config')
if not os.path.exists(web_config):
return {}
d = {}
doc = minidom.parse(web_config)
config = doc.getElementsByTagName('configuration')
for configSection in config:
appSettings = configSection.getElementsByTagName('appSettings')
for appSettingsSection in appSettings:
values = appSettingsSection.getElementsByTagName('add')
for curAdd in values:
key = curAdd.getAttribute('key')
value = curAdd.getAttribute('value')
if key and value is not None:
d[key.strip()] = value
return d
ReadDirectoryChangesW = ctypes.windll.kernel32.ReadDirectoryChangesW
ReadDirectoryChangesW.restype = ctypes.c_uint32
ReadDirectoryChangesW.argtypes = [
ctypes.c_void_p, # HANDLE hDirectory
ctypes.c_void_p, # LPVOID lpBuffer
ctypes.c_uint32, # DWORD nBufferLength
ctypes.c_uint32, # BOOL bWatchSubtree
ctypes.c_uint32, # DWORD dwNotifyFilter
ctypes.POINTER(ctypes.c_uint32), # LPDWORD lpBytesReturned
ctypes.c_void_p, # LPOVERLAPPED lpOverlapped
ctypes.c_void_p # LPOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine
]
try:
from _winapi import (CreateFile, CloseHandle, GetLastError, ExitProcess,
WaitForSingleObject, INFINITE, OPEN_EXISTING)
except ImportError:
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.restype = ctypes.c_void_p
CreateFile.argtypes = [
ctypes.c_wchar_p, # lpFilename
ctypes.c_uint32, # dwDesiredAccess
ctypes.c_uint32, # dwShareMode
ctypes.c_void_p, # LPSECURITY_ATTRIBUTES,
ctypes.c_uint32, # dwCreationDisposition,
ctypes.c_uint32, # dwFlagsAndAttributes,
ctypes.c_void_p # hTemplateFile
]
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.argtypes = [ctypes.c_void_p]
GetLastError = ctypes.windll.kernel32.GetLastError
GetLastError.restype = ctypes.c_uint32
ExitProcess = ctypes.windll.kernel32.ExitProcess
ExitProcess.restype = ctypes.c_void_p
ExitProcess.argtypes = [ctypes.c_uint32]
WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject
WaitForSingleObject.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
WaitForSingleObject.restype = ctypes.c_uint32
OPEN_EXISTING = 3
INFINITE = -1
FILE_LIST_DIRECTORY = 1
FILE_SHARE_READ = 0x00000001
FILE_SHARE_WRITE = 0x00000002
FILE_SHARE_DELETE = 0x00000004
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
MAX_PATH = 260
FILE_NOTIFY_CHANGE_LAST_WRITE = 0x10
ERROR_NOTIFY_ENUM_DIR = 1022
INVALID_HANDLE_VALUE = 0xFFFFFFFF
class FILE_NOTIFY_INFORMATION(ctypes.Structure):
_fields_ = [('NextEntryOffset', ctypes.c_uint32),
('Action', ctypes.c_uint32),
('FileNameLength', ctypes.c_uint32),
('Filename', ctypes.c_wchar)]
_ON_EXIT_TASKS = None
def run_exit_tasks():
global _ON_EXIT_TASKS
maybe_log("Running on_exit tasks")
while _ON_EXIT_TASKS:
tasks, _ON_EXIT_TASKS = _ON_EXIT_TASKS, []
for t in tasks:
try:
t()
except Exception:
maybe_log("Error in exit task: " + traceback.format_exc())
def on_exit(task):
global _ON_EXIT_TASKS
if _ON_EXIT_TASKS is None:
_ON_EXIT_TASKS = tasks = []
try:
evt = int(os.getenv('_FCGI_SHUTDOWN_EVENT_'))
except (TypeError, ValueError):
maybe_log("Could not wait on event %s" % os.getenv('_FCGI_SHUTDOWN_EVENT_'))
else:
def _wait_for_exit():
WaitForSingleObject(evt, INFINITE)
run_exit_tasks()
ExitProcess(0)
start_new_thread(_wait_for_exit, ())
_ON_EXIT_TASKS.append(task)
def start_file_watcher(path, restart_regex):
if restart_regex is None:
restart_regex = ".*((\\.py)|(\\.config))$"
elif not restart_regex:
# restart regex set to empty string, no restart behavior
return
def enum_changes(path):
"""Returns a generator that blocks until a change occurs, then yields
the filename of the changed file.
Yields an empty string and stops if the buffer overruns, indicating that
too many files were changed."""
buffer = ctypes.create_string_buffer(32 * 1024)
bytes_ret = ctypes.c_uint32()
try:
the_dir = CreateFile(
path,
FILE_LIST_DIRECTORY,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
0,
OPEN_EXISTING,
FILE_FLAG_BACKUP_SEMANTICS,
0,
)
except OSError:
maybe_log("Unable to create watcher")
return
if not the_dir or the_dir == INVALID_HANDLE_VALUE:
maybe_log("Unable to create watcher")
return
while True:
ret_code = ReadDirectoryChangesW(
the_dir,
buffer,
ctypes.sizeof(buffer),
True,
FILE_NOTIFY_CHANGE_LAST_WRITE,
ctypes.byref(bytes_ret),
None,
None,
)
if ret_code:
cur_pointer = ctypes.addressof(buffer)
while True:
fni = ctypes.cast(cur_pointer, ctypes.POINTER(FILE_NOTIFY_INFORMATION))
# FileName is not null-terminated, so specifying length is mandatory.
filename = ctypes.wstring_at(cur_pointer + 12, fni.contents.FileNameLength // 2)
yield filename
if fni.contents.NextEntryOffset == 0:
break
cur_pointer = cur_pointer + fni.contents.NextEntryOffset
elif GetLastError() == ERROR_NOTIFY_ENUM_DIR:
CloseHandle(the_dir)
yield ''
return
else:
CloseHandle(the_dir)
return
log('wfastcgi.py will restart when files in %s are changed: %s' % (path, restart_regex))
def watcher(path, restart):
for filename in enum_changes(path):
if not filename:
log('wfastcgi.py exiting because the buffer was full')
run_exit_tasks()
ExitProcess(0)
elif restart.match(filename):
log('wfastcgi.py exiting because %s has changed, matching %s' % (filename, restart_regex))
# we call ExitProcess directly to quickly shutdown the whole process
# because sys.exit(0) won't have an effect on the main thread.
run_exit_tasks()
ExitProcess(0)
restart = re.compile(restart_regex)
start_new_thread(watcher, (path, restart))
def get_wsgi_handler(handler_name):
if not handler_name:
raise Exception('WSGI_HANDLER env var must be set')
if not isinstance(handler_name, str):
handler_name = to_str(handler_name)
module_name, _, callable_name = handler_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list = [(callable_name, should_call)]
handler = None
last_tb = ''
while module_name:
try:
handler = __import__(module_name, fromlist=[name_list[0][0]])
last_tb = ''
for name, should_call in name_list:
handler = getattr(handler, name)
if should_call:
handler = handler()
break
except ImportError:
module_name, _, callable_name = module_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list.insert(0, (callable_name, should_call))
handler = None
last_tb = ': ' + traceback.format_exc()
if handler is None:
raise ValueError('"%s" could not be imported%s' % (handler_name, last_tb))
return handler
def read_wsgi_handler(physical_path):
global APPINSIGHT_CLIENT
env = get_environment(physical_path)
os.environ.update(env)
for path in (v for k, v in env.items() if k.lower() == 'pythonpath'):
# Expand environment variables manually.
expanded_path = re.sub(
'%(\\w+?)%',
lambda m: os.getenv(m.group(1), ''),
path
)
sys.path.extend(fs_encode(p) for p in expanded_path.split(';') if p)
handler = get_wsgi_handler(os.getenv("WSGI_HANDLER"))
instr_key = os.getenv("APPINSIGHTS_INSTRUMENTATIONKEY")
if instr_key:
try:
# Attempt the import after updating sys.path - sites must
# include applicationinsights themselves.
from applicationinsights.requests import WSGIApplication
except ImportError:
maybe_log("Failed to import applicationinsights: " + traceback.format_exc())
else:
handler = WSGIApplication(instr_key, handler)
APPINSIGHT_CLIENT = handler.client
# Ensure we will flush any remaining events when we exit
on_exit(handler.client.flush)
return env, handler
class handle_response(object):
"""A context manager for handling the response. This will ensure that
exceptions in the handler are correctly reported, and the FastCGI request is
properly terminated.
"""
def __init__(self, stream, record, get_output, get_errors):
self.stream = stream
self.record = record
self._get_output = get_output
self._get_errors = get_errors
self.error_message = ''
self.fatal_errors = False
self.physical_path = ''
self.header_bytes = None
self.sent_headers = False
def __enter__(self):
record = self.record
record.params['wsgi.input'] = BytesIO(record.params['wsgi.input'])
record.params['wsgi.version'] = (1, 0)
record.params['wsgi.url_scheme'] = 'https' if record.params.get('HTTPS', '').lower() == 'on' else 'http'
record.params['wsgi.multiprocess'] = True
record.params['wsgi.multithread'] = False
record.params['wsgi.run_once'] = False
self.physical_path = record.params.get('APPL_PHYSICAL_PATH', os.path.dirname(__file__))
if 'HTTP_X_ORIGINAL_URL' in record.params:
# We've been re-written for shared FastCGI hosting, so send the
# original URL as PATH_INFO.
record.params['PATH_INFO'] = record.params['HTTP_X_ORIGINAL_URL']
record.params['wsgi.path_info'] = record.params['wfastcgi.http_x_original_url']
# PATH_INFO is not supposed to include the query parameters, so remove them
record.params['PATH_INFO'] = record.params['PATH_INFO'].partition('?')[0]
record.params['wsgi.path_info'] = record.params['wsgi.path_info'].partition(wsgi_encode('?'))[0]
return self
def __exit__(self, exc_type, exc_value, exc_tb):
# Send any error message on FCGI_STDERR.
if exc_type and exc_type is not _ExitException:
error_msg = "%s:\n\n%s\n\nStdOut: %s\n\nStdErr: %s" % (
self.error_message or 'Error occurred',
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)),
self._get_output(),
self._get_errors(),
)
if not self.header_bytes or not self.sent_headers:
self.header_bytes = wsgi_encode('Status: 500 Internal Server Error\r\n')
self.send(FCGI_STDERR, wsgi_encode(error_msg))
# Best effort at writing to the log. It's more important to
# finish the response or the user will only see a generic 500
# error.
maybe_log(error_msg)
# End the request. This has to run in both success and failure cases.
self.send(FCGI_END_REQUEST, zero_bytes(8), streaming=False)
# Remove the request from our global dict
del _REQUESTS[self.record.req_id]
# Suppress all exceptions unless requested
return not self.fatal_errors
@staticmethod
def _decode_header(key, value):
if not isinstance(key, str):
key = wsgi_decode(key)
if not isinstance(value, str):
value = wsgi_decode(value)
return key, value
def start(self, status, headers, exc_info=None):
"""Starts sending the response. The response is ended when the context
manager exits."""
if exc_info:
try:
if self.sent_headers:
# We have to re-raise if we've already started sending data.
raise exception_with_traceback(exc_info[1], exc_info[2])
finally:
exc_info = None
elif self.header_bytes:
raise Exception('start_response has already been called')
if not isinstance(status, str):
status = wsgi_decode(status)
header_text = 'Status: %s\r\n' % status
if headers:
header_text += ''.join('%s: %s\r\n' % handle_response._decode_header(*i) for i in headers)
self.header_bytes = wsgi_encode(header_text + '\r\n')
return lambda content: self.send(FCGI_STDOUT, content)
def send(self, resp_type, content, streaming=True):
'''Sends part of the response.'''
if not self.sent_headers:
if not self.header_bytes:
raise Exception("start_response has not yet been called")
self.sent_headers = True
send_response(self.stream, self.record.req_id, FCGI_STDOUT, self.header_bytes)
self.header_bytes = None
return send_response(self.stream, self.record.req_id, resp_type, content, streaming)
_REQUESTS = {}
def main():
initialized = False
log('wfastcgi.py %s started' % __version__)
log('Python version: %s' % sys.version)
try:
fcgi_stream = sys.stdin.detach() if sys.version_info[0] >= 3 else sys.stdin
try:
import msvcrt
msvcrt.setmode(fcgi_stream.fileno(), os.O_BINARY)
except ImportError:
pass
while True:
record = read_fastcgi_record(fcgi_stream)
if not record:
continue
errors = sys.stderr = sys.__stderr__ = record.params['wsgi.errors'] = StringIO()
output = sys.stdout = sys.__stdout__ = StringIO()
with handle_response(fcgi_stream, record, output.getvalue, errors.getvalue) as response:
if not initialized:
log('wfastcgi.py %s initializing' % __version__)
os.chdir(response.physical_path)
sys.path[0] = '.'
# Initialization errors should be treated as fatal.
response.fatal_errors = True
response.error_message = 'Error occurred while reading WSGI handler'
env, handler = read_wsgi_handler(response.physical_path)
response.error_message = 'Error occurred starting file watcher'
start_file_watcher(response.physical_path, env.get('WSGI_RESTART_FILE_REGEX'))
# Enable debugging if possible. Default to local-only, but
# allow a web.config to override where we listen
ptvsd_secret = env.get('WSGI_PTVSD_SECRET')
if ptvsd_secret:
ptvsd_address = (env.get('WSGI_PTVSD_ADDRESS') or 'localhost:5678').split(':', 2)
try:
ptvsd_port = int(ptvsd_address[1])
except LookupError:
ptvsd_port = 5678
except ValueError:
log('"%s" is not a valid port number for debugging' % ptvsd_address[1])
ptvsd_port = 0
if ptvsd_address[0] and ptvsd_port:
try:
import ptvsd
except ImportError:
log('unable to import ptvsd to enable debugging')
else:
addr = ptvsd_address[0], ptvsd_port
ptvsd.enable_attach(secret=ptvsd_secret, address=addr)
log('debugging enabled on %s:%s' % addr)
response.error_message = ''
response.fatal_errors = False
log('wfastcgi.py %s initialized' % __version__)
initialized = True
os.environ.update(env)
# SCRIPT_NAME + PATH_INFO is supposed to be the full path
# (http://www.python.org/dev/peps/pep-0333/) but by default
# (http://msdn.microsoft.com/en-us/library/ms525840(v=vs.90).aspx)
# IIS is sending us the full URL in PATH_INFO, so we need to
# clear the script name here
if 'AllowPathInfoForScriptMappings' not in os.environ:
record.params['SCRIPT_NAME'] = ''
record.params['wsgi.script_name'] = wsgi_encode('')
# correct SCRIPT_NAME and PATH_INFO if we are told what our SCRIPT_NAME should be
if 'SCRIPT_NAME' in os.environ and record.params['PATH_INFO'].lower().startswith(os.environ['SCRIPT_NAME'].lower()):
record.params['SCRIPT_NAME'] = os.environ['SCRIPT_NAME']
record.params['PATH_INFO'] = record.params['PATH_INFO'][len(record.params['SCRIPT_NAME']):]
record.params['wsgi.script_name'] = wsgi_encode(record.params['SCRIPT_NAME'])
record.params['wsgi.path_info'] = wsgi_encode(record.params['PATH_INFO'])
# Send each part of the response to FCGI_STDOUT.
# Exceptions raised in the handler will be logged by the context
# manager and we will then wait for the next record.
result = handler(record.params, response.start)
try:
for part in result:
if part:
response.send(FCGI_STDOUT, part)
finally:
if hasattr(result, 'close'):
result.close()
except _ExitException:
pass
except Exception:
maybe_log('Unhandled exception in wfastcgi.py: ' + traceback.format_exc())
except BaseException:
maybe_log('Unhandled exception in wfastcgi.py: ' + traceback.format_exc())
raise
finally:
run_exit_tasks()
maybe_log('wfastcgi.py %s closed' % __version__)
def _run_appcmd(args):
from subprocess import check_call, CalledProcessError
if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]):
appcmd = sys.argv[1:]
else:
appcmd = [os.path.join(os.getenv('SystemRoot'), 'system32', 'inetsrv', 'appcmd.exe')]
if not os.path.isfile(appcmd[0]):
print('IIS configuration tool appcmd.exe was not found at', appcmd, file=sys.stderr)
return -1
args = appcmd + args
try:
return check_call(args)
except CalledProcessError as ex:
print('''An error occurred running the command:
%r
Ensure your user has sufficient privileges and try again.''' % args, file=sys.stderr)
return ex.returncode
def enable():
executable = '"' + sys.executable + '"' if ' ' in sys.executable else sys.executable
quoted_file = '"' + __file__ + '"' if ' ' in __file__ else __file__
res = _run_appcmd([
"set", "config", "/section:system.webServer/fastCGI",
"/+[fullPath='" + executable + "', arguments='" + quoted_file + "', signalBeforeTerminateSeconds='30']"
])
if res == 0:
print('"%s|%s" can now be used as a FastCGI script processor' % (executable, quoted_file))
return res
def disable():
executable = '"' + sys.executable + '"' if ' ' in sys.executable else sys.executable
quoted_file = '"' + __file__ + '"' if ' ' in __file__ else __file__
res = _run_appcmd([
"set", "config", "/section:system.webServer/fastCGI",
"/-[fullPath='" + executable + "', arguments='" + quoted_file + "', signalBeforeTerminateSeconds='30']"
])
if res == 0:
print('"%s|%s" is no longer registered for use with FastCGI' % (executable, quoted_file))
return res
if __name__ == '__main__':
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from cairis.core.ARM import *
from cairis.daemon.CairisHTTPError import ARMHTTPError, ObjectNotFoundHTTPError, MalformedJSONHTTPError, MissingParameterHTTPError, \
OverwriteNotAllowedHTTPError, SilentHTTPError
import cairis.core.MisuseCaseFactory
from cairis.core.MisuseCaseParameters import MisuseCaseParameters
from cairis.core.MisuseCase import MisuseCase
from cairis.core.MisuseCaseEnvironmentProperties import MisuseCaseEnvironmentProperties
from cairis.core.RiskParameters import RiskParameters
from cairis.data.AssetDAO import AssetDAO
from cairis.data.CairisDAO import CairisDAO
from cairis.core.Risk import Risk
from cairis.misc.EnvironmentModel import EnvironmentModel
from cairis.tools.JsonConverter import json_deserialize
from cairis.tools.ModelDefinitions import RiskModel, MisuseCaseModel, MisuseCaseEnvironmentPropertiesModel
from cairis.tools.PseudoClasses import RiskScore, RiskRating
from cairis.tools.SessionValidator import check_required_keys, get_fonts
__author__ = 'Robin Quetin, Shamal Faily'
class RiskDAO(CairisDAO):
def __init__(self, session_id):
CairisDAO.__init__(self, session_id)
def get_risks(self, constraint_id=-1, simplify=True, skip_misuse=False):
try:
risks = self.db_proxy.getRisks(constraintId=constraint_id)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
riskKeys = sorted(risks.keys())
riskList = []
for key in riskKeys:
value = risks[key]
if value.theMisuseCase:
value.theMisuseCase = self.get_misuse_case_by_risk_name(value.theName, simplify=False)
riskList.append(self.simplify(value))
return riskList
def get_risks_summary(self):
try:
risks = self.db_proxy.getRisksSummary()
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
return risks
def get_risk_names(self):
risks = self.get_risks(skip_misuse=True)
risk_names = list(risks.keys())
return risk_names
def risk_model_elements(self,envName):
try:
return self.db_proxy.riskModelElements(envName)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def get_risk_by_name(self, name, simplify=True, skip_misuse=False):
"""
:rtype : Risk
"""
try:
riskId = self.db_proxy.getDimensionId(name,'risk')
risks = self.db_proxy.getRisks(riskId)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
if risks is not None:
found_risk = risks.get(name)
if found_risk is None:
self.close()
raise ObjectNotFoundHTTPError(obj='The provided risk name')
if found_risk.theMisuseCase and not skip_misuse:
found_risk.theMisuseCase = self.get_misuse_case_by_risk_name(found_risk.theName, simplify=False)
if simplify:
found_risk = self.simplify(found_risk)
return found_risk
def get_risk_analysis_model(self, environment_name, dim_name, obj_name,model_layout):
fontName, fontSize, apFontName = get_fonts(session_id=self.session_id)
try:
riskAnalysisModel = self.db_proxy.riskAnalysisModel(environment_name, dim_name, obj_name)
tLinks = EnvironmentModel(riskAnalysisModel, environment_name, self.db_proxy, model_layout, fontName=fontName, fontSize=fontSize)
dot_code = tLinks.graph()
if not dot_code:
raise ObjectNotFoundHTTPError('The risk analysis model')
return dot_code
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
except Exception as ex:
self.close()
print(ex)
def delete_risk(self, name):
found_risk = self.get_risk_by_name(name)
try:
riskId = self.db_proxy.getDimensionId(name,'risk')
self.db_proxy.deleteRisk(riskId)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def add_risk(self, risk):
try:
self.db_proxy.nameCheck(risk.name(), 'risk')
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
params = RiskParameters(riskName=risk.name(),threatName=risk.threat(),vulName=risk.vulnerability(),mc=risk.misuseCase(),rTags=risk.tags())
try:
self.db_proxy.addRisk(params)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def update_risk(self, risk_name, risk):
params = RiskParameters(riskName=risk.theName,threatName=risk.theThreatName,vulName=risk.theVulnerabilityName,mc=risk.theMisuseCase,rTags=risk.theTags)
try:
riskId = self.db_proxy.getDimensionId(risk_name,'risk')
params.setId(riskId)
self.db_proxy.updateRisk(params)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def check_existing_risk(self, risk_name):
try:
self.get_risk_by_name(risk_name)
return True
except ObjectNotFoundHTTPError:
self.db_proxy.reconnect(session_id=self.session_id)
return False
# region Misuse cases
def get_misuse_cases(self, constraint_id=-1, simplify=True):
"""
:type constraint_id: int
:type simplify: bool
:rtype: dict[str,MisuseCase]
"""
try:
misuse_cases = self.db_proxy.getMisuseCases(constraintId=constraint_id)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
for key in misuse_cases:
threat_name, vuln_name = self.db_proxy.misuseCaseRiskComponents(key)
misuse_cases[key].theThreatName = threat_name
misuse_cases[key].theVulnerabilityName = vuln_name
for mcep in misuse_cases[key].environmentProperties():
envName = mcep.name()
misuse_cases[key].theObjective = self.get_misuse_case_obj_and_assets(threat_name,vuln_name,envName)
if simplify:
misuse_cases[key] = self.simplify(misuse_cases[key])
return misuse_cases
def get_misuse_case_by_name(self, misuse_case_name):
try:
misuse_cases = self.db_proxy.getMisuseCases()
for key in misuse_cases:
if key == misuse_case_name:
return misuse_cases[key]
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def get_misuse_case_by_risk_name(self, risk_name, simplify=True):
try:
riskId = self.db_proxy.getDimensionId(risk_name,'risk')
misuse_case = self.db_proxy.riskMisuseCase(riskId)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
if not misuse_case:
self.close()
raise ObjectNotFoundHTTPError('The misuse case associated with the risk')
assert isinstance(misuse_case, MisuseCase)
misuse_case = self.expand_mc_props(misuse_case)
if simplify:
misuse_case = self.simplify(misuse_case)
if hasattr(misuse_case,'theId'):
del misuse_case.theId
del misuse_case.theEnvironmentDictionary
return misuse_case
def get_misuse_case_by_threat_vulnerability(self, threat_name,vulnerability_name):
try:
misuse_case = self.db_proxy.riskMisuseCase(-1,threat_name,vulnerability_name)
if misuse_case != None:
return self.simplify(misuse_case)
else:
return self.simplify(cairis.core.MisuseCaseFactory.build(threat_name,vulnerability_name,self.db_proxy))
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def get_misuse_case_assets(self, threat_name, environment_name):
"""
:rtype : list[str]
"""
attackers = []
try:
threat_id = self.db_proxy.getDimensionId(threat_name, 'threat')
environment_id = self.db_proxy.getDimensionId(environment_name, 'environment')
attackers = self.db_proxy.threatAttackers(threat_id, environment_id)
except DatabaseProxyException as ex:
SilentHTTPError(ex.value)
except ARMException as ex:
SilentHTTPError(str(ex.value))
return attackers
def get_misuse_case_attackers(self, threat_name, environment_name):
"""
:rtype : list[str]
"""
attackers = []
try:
threat_id = self.db_proxy.getDimensionId(threat_name, 'threat')
environment_id = self.db_proxy.getDimensionId(environment_name, 'environment')
attackers = self.db_proxy.threatAttackers(threat_id, environment_id)
except DatabaseProxyException as ex:
SilentHTTPError(ex.value)
except ARMException as ex:
SilentHTTPError(str(ex.value))
return attackers
def get_misuse_case_obj_and_assets(self, threat_name, vulnerability_name, environment_name):
"""
:rtype : str, list[Asset]
"""
dao = AssetDAO(self.session_id)
threatened_assets = []
vulnerable_assets = []
try:
threatened_assets = dao.get_threatened_assets(threat_name, environment_name)
vulnerable_assets = dao.get_vulnerable_assets(vulnerability_name, environment_name)
except ObjectNotFoundHTTPError as ex:
SilentHTTPError(ex.message)
objectiveText = 'Exploit vulnerabilities in '
for idx,vulAsset in enumerate(vulnerable_assets):
objectiveText += vulAsset
if (idx != (len(vulnerable_assets) -1)):
objectiveText += ','
objectiveText += ' to threaten '
for idx,thrAsset in enumerate(threatened_assets):
objectiveText += thrAsset
if (idx != (len(threatened_assets) -1)):
objectiveText += ','
objectiveText += '.'
assets = set(threatened_assets + vulnerable_assets)
return objectiveText, list(assets)
def get_misuse_case_likelihood(self, threat_name, environment_name):
likelihood_name = 'N/A'
try:
threat_id = self.db_proxy.getDimensionId(threat_name, 'threat')
environment_id = self.db_proxy.getDimensionId(environment_name, 'environment')
likelihood_name = self.db_proxy.threatLikelihood(threat_id, environment_id)
except DatabaseProxyException as ex:
SilentHTTPError(ex.value)
except ARMException as ex:
SilentHTTPError(str(ex.value))
return likelihood_name
def get_misuse_case_severity(self, vulnerability_name, environment_name):
severity_name = 'N/A'
try:
vulnerability_id = self.db_proxy.getDimensionId(vulnerability_name, 'vulnerability')
environment_id = self.db_proxy.getDimensionId(environment_name, 'environment')
severity_name = self.db_proxy.vulnerabilitySeverity(vulnerability_id, environment_id)
except DatabaseProxyException as ex:
SilentHTTPError(ex.value)
except ARMException as ex:
SilentHTTPError(str(ex.value))
return severity_name
def expand_mc_props(self, misuse_case):
# Fetch threat and vulnerability name
try:
threat_name, vuln_name = self.db_proxy.misuseCaseRiskComponents(misuse_case.theName)
misuse_case.theThreatName = threat_name
misuse_case.theVulnerabilityName = vuln_name
except DatabaseProxyException as ex:
self.close()
if ex.value.find('Error obtaining risk components associated with Misuse Case'):
raise ObjectNotFoundHTTPError('The associated threat and vulnerability name')
else:
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
# Add objective, likelihood, severity and risk rating
for idx in range(0, len(misuse_case.theEnvironmentProperties)):
env_prop = misuse_case.theEnvironmentProperties[idx]
assert isinstance(env_prop, MisuseCaseEnvironmentProperties)
env_prop.theObjective, env_prop.theAssets = self.get_misuse_case_obj_and_assets(
misuse_case.theThreatName,
misuse_case.theVulnerabilityName,
env_prop.theEnvironmentName
)
env_prop.theLikelihood = self.get_misuse_case_likelihood(threat_name, env_prop.theEnvironmentName)
env_prop.theSeverity = self.get_misuse_case_severity(vuln_name, env_prop.theEnvironmentName)
env_prop.theRiskRating = self.get_risk_rating_by_tve(threat_name, vuln_name, env_prop.theEnvironmentName)
env_prop.theAttackers = self.get_misuse_case_attackers(threat_name, env_prop.theEnvironmentName)
misuse_case.theEnvironmentProperties[idx] = env_prop
return misuse_case
# endregion
# region Risk scores
def get_scores_by_rtve(self, risk_name, threat_name, vulnerability_name, environment_name):
try:
scores = self.db_proxy.riskScore(threat_name, vulnerability_name, environment_name, risk_name)
if len(scores) > 0:
scores = self.convert_scores(real_scores=scores)
return scores
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def convert_scores(self, real_scores=None, fake_scores=None):
new_scores = []
if real_scores:
if len(real_scores) > 0:
for idx in range(0, len(real_scores)):
real_score = real_scores[idx]
if len(real_score) == 4:
new_score = RiskScore(response_name=real_score[0],unmit_score=real_score[1],mit_score=real_score[2],details=real_score[3])
new_scores.append(new_score)
elif fake_scores:
if len(fake_scores) > 0:
for idx in range(0, len(fake_scores)):
fake_score = fake_scores[idx]
assert isinstance(fake_score, RiskScore)
check_required_keys(fake_score, RiskScore.required)
if fake_score['unmitScore'] == -1:
fake_score['unmitScore'] = None
if fake_score['mitScore'] == -1:
fake_score['mitScore'] = None
new_score = (fake_score['responseName'], fake_score['unmitScore'], fake_score['mitScore'], fake_score['details'])
new_scores.append(new_score)
else:
self.close()
raise MissingParameterHTTPError(param_names=['scores'])
return new_scores
# endregion
# region Risk rating
def get_risk_rating_by_tve(self, threat_name, vulnerability_name, environment_name):
"""
:rtype: RiskRating
"""
try:
rating = self.db_proxy.riskRating(-1,threat_name, vulnerability_name, environment_name)
risk_rating = RiskRating(threat_name, vulnerability_name, environment_name, rating)
return risk_rating
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
except TypeError:
self.close()
raise ObjectNotFoundHTTPError(obj='A rating for the risk')
# endregion
def from_json(self, request):
json = request.get_json(silent=True)
if json is False or json is None:
self.close()
raise MalformedJSONHTTPError(data=request.get_data())
json_dict = json['object']
check_required_keys(json_dict, RiskModel.required)
json_dict['__python_obj__'] = RiskParameters.__module__+'.'+RiskParameters.__name__
if json_dict['theMisuseCase']:
mc_dict = json_dict['theMisuseCase']
check_required_keys(mc_dict, MisuseCaseModel.required)
mc_dict['__python_obj__'] = MisuseCaseParameters.__module__+'.'+MisuseCaseParameters.__name__
for idx in range(0, len(mc_dict['theEnvironmentProperties'])):
mcep_dict = mc_dict['theEnvironmentProperties'][idx]
check_required_keys(mcep_dict, MisuseCaseEnvironmentPropertiesModel.required)
mcep_dict['__python_obj__'] = MisuseCaseEnvironmentProperties.__module__+'.'+MisuseCaseEnvironmentProperties.__name__
mc_dict['theEnvironmentProperties'][idx] = mcep_dict
json_dict['theMisuseCase'] = mc_dict
risk = json_deserialize(json_dict)
if isinstance(risk, RiskParameters):
return risk
else:
raise MalformedJSONHTTPError()
def simplify(self, obj):
misuse_case = None
if isinstance(obj, Risk):
misuse_case = obj.theMisuseCase
elif isinstance(obj, MisuseCase):
if hasattr(obj,'theId'):
del obj.theId
del obj.theEnvironmentDictionary
misuse_case = obj
if isinstance(obj, Risk):
obj.theMisuseCase = misuse_case
del obj.theId
elif isinstance(obj, MisuseCase):
obj = misuse_case
return obj
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Handles blob store uploading and serving in the front-end.
Includes a WSGI application that handles upload requests and inserts the
contents into the blob store.
"""
import base64
import cgi
import cStringIO
import datetime
import email.generator
import email.message
from email.mime import multipart
import hashlib
import logging
import os
import random
import re
import sys
import time
import urlparse
import google
import webob.exc
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api.blobstore import blobstore
from google.appengine.tools.devappserver2 import constants
# Upload URL path.
UPLOAD_URL_PATH = '_ah/upload/'
# Upload URL pattern.
_UPLOAD_URL_PATTERN = re.compile(r'/%s(.*)' % UPLOAD_URL_PATH)
# Pattern for MIME types.
_MIME_PATTERN = re.compile(r'([^/; ]+)/([^/; ]+)$')
# These are environment variables that do not make any sense to transmit because
# the values contained in them would be obsolete after the request has been
# transformed from full upload objects to blob-info records.
_STRIPPED_ENVIRON = frozenset(('HTTP_CONTENT_LENGTH',
'HTTP_CONTENT_MD5',
'HTTP_CONTENT_TYPE',
))
# These are the MIME headers that need to be removed from the external-body
# message, because we are going to set our own.
# cgi.FieldStorage makes these all lowercase.
_STRIPPED_FILE_HEADERS = frozenset(('content-type',
'content-md5',
'content-length',
))
# The maximum length of the content-type and filename fields as dictated by
# the maximum length of a string in the datastore
_MAX_STRING_NAME_LENGTH = 500
class Error(Exception):
"""Base class for upload processing errors."""
class _InvalidMIMETypeFormatError(Error):
"""MIME type was formatted incorrectly."""
class _TooManyConflictsError(Error):
"""There were too many conflicts generating a blob key."""
class _InvalidMetadataError(Error):
"""The filename or content type of the entity was not a valid UTF-8 string."""
def _get_blob_storage():
"""Gets the BlobStorage instance from the API proxy stub map.
Returns:
The BlobStorage instance as registered with blobstore API in stub map.
"""
return apiproxy_stub_map.apiproxy.GetStub('blobstore').storage
def _generate_blob_key(time_func=time.time, random_func=random.random):
"""Generate a unique blob key.
The key is generated using the current time stamp combined with a random
number. The two values are hashed with MD5 and then base-64 encoded
(url-safe). The new key is checked to see if it already exists within the
datastore and the random number is regenerated until there is no match.
Args:
time_func: Function that generates a timestamp, as a floating-point number
representing seconds since the epoch in UTC. Used for dependency injection
(allows for predictable results during tests).
random_func: Function used for generating the random number. Used for
dependency injection (allows for predictable results during tests).
Returns:
String version of the blob key that is unique within the __BlobInfo__
datastore.
Raises:
_TooManyConflictsError: There are too many name conflicts.
"""
timestamp = str(time_func())
tries = 0
while tries < 10:
number = str(random_func())
digester = hashlib.md5()
digester.update(timestamp)
digester.update(number)
blob_key = base64.urlsafe_b64encode(digester.digest())
datastore_key = datastore.Key.from_path(blobstore.BLOB_INFO_KIND, blob_key,
namespace='')
try:
datastore.Get(datastore_key)
tries += 1
except datastore_errors.EntityNotFoundError:
return blob_key
raise _TooManyConflictsError()
def _split_mime_type(mime_type):
"""Split MIME type into main type and subtype.
Args:
mime_type: The full MIME type string.
Returns:
(main, sub):
main: Main part of MIME type (e.g., application, image, text, etc).
sub: Subtype part of MIME type (e.g., pdf, png, html, etc).
Raises:
_InvalidMIMETypeFormatError: mime_type is incorrectly formatted.
"""
if mime_type:
match = _MIME_PATTERN.match(mime_type)
if not match:
raise _InvalidMIMETypeFormatError(
'Incorrectly formatted MIME type: %s' % mime_type)
return match.groups()
else:
return 'application', 'octet-stream'
class Application(object):
"""A WSGI middleware application for handling blobstore upload requests.
This application will handle all uploaded files in a POST request, store the
results in the blob-storage, close the upload session and forward the request
on to another WSGI application, with the environment transformed so that the
uploaded file contents are replaced with their blob keys.
"""
def __init__(self, forward_app, get_blob_storage=_get_blob_storage,
generate_blob_key=_generate_blob_key,
now_func=datetime.datetime.now):
"""Constructs a new Application.
Args:
forward_app: A WSGI application to forward successful upload requests to.
get_blob_storage: Callable that returns a BlobStorage instance. The
default is fine, but may be overridden for testing purposes.
generate_blob_key: Function used for generating unique blob keys.
now_func: Function that returns the current timestamp.
"""
self._forward_app = forward_app
self._blob_storage = get_blob_storage()
self._generate_blob_key = generate_blob_key
self._now_func = now_func
def abort(self, code, detail=None):
"""Aborts the application by raising a webob.exc.HTTPException.
Args:
code: HTTP status code int.
detail: Optional detail message str.
Raises:
webob.exc.HTTPException: Always.
"""
exception = webob.exc.status_map[code]()
if detail:
exception.detail = detail
raise exception
def store_blob(self, content_type, filename, md5_hash, blob_file, creation,
base64_encoding=False):
"""Store a supplied form-data item to the blobstore.
The appropriate metadata is stored into the datastore.
Args:
content_type: The MIME content type of the uploaded file.
filename: The filename of the uploaded file.
md5_hash: MD5 hash of the file contents, as a hashlib hash object.
blob_file: A file-like object containing the contents of the file.
creation: datetime.datetime instance to associate with new blobs creation
time. This parameter is provided so that all blobs in the same upload
form can have the same creation date.
base64_encoding: True, if the file contents are base-64 encoded.
Returns:
datastore.Entity('__BlobInfo__') associated with the upload.
Raises:
_TooManyConflictsError if there were too many name conflicts generating a
blob key.
"""
blob_key = self._generate_blob_key()
if base64_encoding:
blob_file = cStringIO.StringIO(base64.urlsafe_b64decode(blob_file.read()))
# If content_type or filename are bytes, assume UTF-8 encoding.
try:
if not isinstance(content_type, unicode):
content_type = content_type.decode('utf-8')
if not isinstance(filename, unicode):
filename = filename.decode('utf-8')
except UnicodeDecodeError:
raise _InvalidMetadataError(
'The uploaded entity contained invalid UTF-8 metadata. This may be '
'because the page containing the upload form was served with a '
'charset other than "utf-8".')
# Store the blob contents in the blobstore.
self._blob_storage.StoreBlob(blob_key, blob_file)
# Store the blob metadata in the datastore as a __BlobInfo__ entity.
blob_entity = datastore.Entity('__BlobInfo__', name=str(blob_key),
namespace='')
blob_entity['content_type'] = content_type
blob_entity['creation'] = creation
blob_entity['filename'] = filename
blob_entity['md5_hash'] = md5_hash.hexdigest()
blob_entity['size'] = blob_file.tell()
datastore.Put(blob_entity)
return blob_entity
def store_and_build_forward_message(self, form, boundary=None,
max_bytes_per_blob=None,
max_bytes_total=None,
bucket_name=None):
"""Reads form data, stores blobs data and builds the forward request.
This finds all of the file uploads in a set of form fields, converting them
into blobs and storing them in the blobstore. It also generates the HTTP
request to forward to the user's application.
Args:
form: cgi.FieldStorage instance representing the whole form derived from
original POST data.
boundary: The optional boundary to use for the resulting form. If omitted,
one is randomly generated.
max_bytes_per_blob: The maximum size in bytes that any single blob
in the form is allowed to be.
max_bytes_total: The maximum size in bytes that the total of all blobs
in the form is allowed to be.
bucket_name: The name of the Google Storage bucket to store the uploaded
files.
Returns:
A tuple (content_type, content_text), where content_type is the value of
the Content-Type header, and content_text is a string containing the body
of the HTTP request to forward to the application.
Raises:
webob.exc.HTTPException: The upload failed.
"""
message = multipart.MIMEMultipart('form-data', boundary)
creation = self._now_func()
total_bytes_uploaded = 0
created_blobs = []
mime_type_error = None
too_many_conflicts = False
upload_too_large = False
filename_too_large = False
content_type_too_large = False
# Extract all of the individual form items out of the FieldStorage.
form_items = []
# Sorting of forms is done merely to make testing a little easier since
# it means blob-keys are generated in a predictable order.
for key in sorted(form):
form_item = form[key]
if isinstance(form_item, list):
form_items.extend(form_item)
else:
form_items.append(form_item)
for form_item in form_items:
disposition_parameters = {'name': form_item.name}
variable = email.message.Message()
if form_item.filename is None:
# Copy as is
variable.add_header('Content-Type', 'text/plain')
variable.set_payload(form_item.value)
else:
# If there is no filename associated with this field it means that the
# file form field was not filled in. This blob should not be created
# and forwarded to success handler.
if not form_item.filename:
continue
disposition_parameters['filename'] = form_item.filename
try:
main_type, sub_type = _split_mime_type(form_item.type)
except _InvalidMIMETypeFormatError, ex:
mime_type_error = str(ex)
break
# Seek to the end of file and use the pos as the length.
form_item.file.seek(0, os.SEEK_END)
content_length = form_item.file.tell()
form_item.file.seek(0)
total_bytes_uploaded += content_length
if max_bytes_per_blob is not None:
if content_length > max_bytes_per_blob:
upload_too_large = True
break
if max_bytes_total is not None:
if total_bytes_uploaded > max_bytes_total:
upload_too_large = True
break
if form_item.filename is not None:
if len(form_item.filename) > _MAX_STRING_NAME_LENGTH:
filename_too_large = True
break
if form_item.type is not None:
if len(form_item.type) > _MAX_STRING_NAME_LENGTH:
content_type_too_large = True
break
# Compute the MD5 hash of the upload.
digester = hashlib.md5()
while True:
block = form_item.file.read(1 << 20)
if not block:
break
digester.update(block)
form_item.file.seek(0)
# Create the external body message containing meta-data about the blob.
external = email.message.Message()
external.add_header('Content-Type', '%s/%s' % (main_type, sub_type),
**form_item.type_options)
# NOTE: This is in violation of RFC 2616 (Content-MD5 should be the
# base-64 encoding of the binary hash, not the hex digest), but it is
# consistent with production.
blob_key = base64.urlsafe_b64encode(digester.hexdigest())
# Create header MIME message
headers = dict(form_item.headers)
for name in _STRIPPED_FILE_HEADERS:
if name in headers:
del headers[name]
headers['Content-Length'] = str(content_length)
headers[blobstore.UPLOAD_INFO_CREATION_HEADER] = (
blobstore._format_creation(creation))
headers['Content-MD5'] = blob_key
if bucket_name:
headers[blobstore.CLOUD_STORAGE_OBJECT_HEADER] = (
'/gs/%s/fake-%s' % (bucket_name, blob_key))
for key, value in headers.iteritems():
external.add_header(key, value)
# Add disposition parameters (a clone of the outer message's field).
if not external.get('Content-Disposition'):
external.add_header('Content-Disposition', 'form-data',
**disposition_parameters)
# Store the actual contents in the blobstore.
base64_encoding = (form_item.headers.get('Content-Transfer-Encoding') ==
'base64')
try:
blob_entity = self.store_blob(external['content-type'],
form_item.filename, digester,
form_item.file, creation,
base64_encoding=base64_encoding)
except _TooManyConflictsError:
too_many_conflicts = True
break
# Track created blobs in case we need to roll them back.
created_blobs.append(blob_entity)
variable.add_header('Content-Type', 'message/external-body',
access_type=blobstore.BLOB_KEY_HEADER,
blob_key=blob_entity.key().name())
variable.set_payload([external])
# Set common information.
variable.add_header('Content-Disposition', 'form-data',
**disposition_parameters)
message.attach(variable)
if (mime_type_error or too_many_conflicts or upload_too_large or
filename_too_large or content_type_too_large):
for blob in created_blobs:
datastore.Delete(blob)
if mime_type_error:
self.abort(400, detail=mime_type_error)
elif too_many_conflicts:
self.abort(500, detail='Could not generate a blob key.')
elif upload_too_large:
self.abort(413)
else:
if filename_too_large:
invalid_field = 'filename'
elif content_type_too_large:
invalid_field = 'Content-Type'
detail = 'The %s exceeds the maximum allowed length of %s.' % (
invalid_field, _MAX_STRING_NAME_LENGTH)
self.abort(400, detail=detail)
message_out = cStringIO.StringIO()
gen = email.generator.Generator(message_out, maxheaderlen=0)
gen.flatten(message, unixfrom=False)
# Get the content text out of the message.
message_text = message_out.getvalue()
content_start = message_text.find('\n\n') + 2
content_text = message_text[content_start:]
content_text = content_text.replace('\n', '\r\n')
return message.get('Content-Type'), content_text
def store_blob_and_transform_request(self, environ):
"""Stores a blob in response to a WSGI request and transforms environ.
environ is modified so that it is suitable for forwarding to the user's
application.
Args:
environ: An environ dict for the current request as defined in PEP-333.
Raises:
webob.exc.HTTPException: The upload failed.
"""
# Only permit POST.
if environ['REQUEST_METHOD'].lower() != 'post':
self.abort(405)
url_match = _UPLOAD_URL_PATTERN.match(environ['PATH_INFO'])
if not url_match:
self.abort(404)
upload_key = url_match.group(1)
# Retrieve upload session.
try:
upload_session = datastore.Get(upload_key)
except datastore_errors.EntityNotFoundError:
detail = 'No such upload session: %s' % upload_key
logging.error(detail)
self.abort(404, detail=detail)
success_path = upload_session['success_path'].encode('ascii')
max_bytes_per_blob = upload_session['max_bytes_per_blob']
max_bytes_total = upload_session['max_bytes_total']
bucket_name = upload_session.get('gs_bucket_name', None)
upload_form = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ)
# Generate debug log message
logstrings = []
for k in sorted(upload_form):
vs = upload_form[k]
if not isinstance(vs, list):
vs = [vs]
for v in vs:
if v.filename:
logstrings.append('%s=%s' % (k, v.filename))
logging.debug('Received blobstore upload: %s', ', '.join(logstrings))
# It's ok to read the whole string in memory because the content is
# merely a reference to the blob, not the blob itself.
content_type, content_text = self.store_and_build_forward_message(
upload_form,
max_bytes_per_blob=max_bytes_per_blob,
max_bytes_total=max_bytes_total,
bucket_name=bucket_name)
datastore.Delete(upload_session)
# Ensure that certain HTTP_ variables are not forwarded.
for name in _STRIPPED_ENVIRON:
if name in environ:
del environ[name]
# Replace some HTTP headers in the forwarded environ.
environ['CONTENT_TYPE'] = content_type
environ['CONTENT_LENGTH'] = str(len(content_text))
# Forward on to success_path. Like production, only the path and query
# matter.
parsed_url = urlparse.urlsplit(success_path)
environ['PATH_INFO'] = parsed_url.path
if parsed_url.query:
environ['QUERY_STRING'] = parsed_url.query
# The user is always an administrator for the forwarded request.
environ[constants.FAKE_IS_ADMIN_HEADER] = '1'
# Set the wsgi variables
environ['wsgi.input'] = cStringIO.StringIO(content_text)
def __call__(self, environ, start_response):
"""Handles WSGI requests.
Args:
environ: An environ dict for the current request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
# Handle any errors in the blob uploader, but do not catch errors raised by
# the user's application.
try:
self.store_blob_and_transform_request(environ)
except webob.exc.HTTPException, e:
def start_response_with_exc_info(status, headers,
exc_info=sys.exc_info()):
start_response(status, headers, exc_info)
return e(environ, start_response_with_exc_info)
return self._forward_app(environ, start_response)
|
|
import datetime
import hashlib
import json
from dateutil.parser import parse
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from mongoengine.base import ValidationError
from crits.core.crits_mongoengine import EmbeddedSource, create_embedded_source, json_handler
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.class_mapper import class_from_id
from crits.core.handlers import csv_export
from crits.core.user_tools import is_admin, user_sources, is_user_favorite
from crits.core.user_tools import is_user_subscribed
from crits.notifications.handlers import remove_user_from_notification
from crits.raw_data.raw_data import RawData, RawDataType
from crits.services.handlers import run_triage, get_supported_services
def generate_raw_data_csv(request):
"""
Generate a CSV file of the RawData information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request,RawData)
return response
def get_id_from_link_and_version(link, version):
"""
Get the ObjectId from a link_id and version number.
:param link: The link_id of the RawData.
:type link: str
:param version: The version number of the RawData.
:type version: int
:returns: None, ObjectId
"""
raw_data = RawData.objects(link_id=link, version=version).only('id').first()
if not raw_data:
return None
else:
return raw_data.id
def get_raw_data_details(_id, analyst):
"""
Generate the data to render the RawData details template.
:param _id: The ObjectId of the RawData to get details for.
:type _id: str
:param analyst: The user requesting this information.
:type analyst: str
:returns: template (str), arguments (dict)
"""
template = None
sources = user_sources(analyst)
if not _id:
raw_data = None
else:
raw_data = RawData.objects(id=_id, source__name__in=sources).first()
if not raw_data:
template = "error.html"
args = {'error': 'raw_data not yet available or you do not have access to view it.'}
else:
raw_data.sanitize("%s" % analyst)
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, raw_data.id, 'RawData')
# subscription
subscription = {
'type': 'RawData',
'id': raw_data.id,
'subscribed': is_user_subscribed("%s" % analyst,
'RawData', raw_data.id),
}
#objects
objects = raw_data.sort_objects()
#relationships
relationships = raw_data.sort_relationships("%s" % analyst, meta=True)
# relationship
relationship = {
'type': 'RawData',
'value': raw_data.id
}
versions = len(RawData.objects(link_id=raw_data.link_id).only('id'))
#comments
comments = {'comments': raw_data.get_comments(),
'url_key': _id}
#screenshots
screenshots = raw_data.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'RawData', raw_data.id)
# services
service_list = get_supported_services('RawData')
# analysis results
service_results = raw_data.get_analysis_results()
args = {'service_list': service_list,
'objects': objects,
'relationships': relationships,
'comments': comments,
'favorite': favorite,
'relationship': relationship,
"subscription": subscription,
"screenshots": screenshots,
"versions": versions,
"service_results": service_results,
"raw_data": raw_data}
return template, args
def generate_inline_comments(_id):
"""
Generate the inline comments for RawData.
:param _id: The ObjectId of the RawData to generate inline comments for.
:type _id: str
:returns: list
"""
raw_data = RawData.objects(id=_id).first()
if not raw_data:
return []
else:
inlines = []
for i in raw_data.inlines:
html = render_to_string('inline_comment.html',
{'username': i.analyst,
'comment': i.comment,
'date': i.date,
'line': i.line,
'raw_data': {'id': _id}})
inlines.append({'line': i.line, 'html': html})
return inlines
def generate_raw_data_versions(_id):
"""
Generate a list of available versions for this RawData.
:param _id: The ObjectId of the RawData to generate versions for.
:type _id: str
:returns: list
"""
raw_data = RawData.objects(id=_id).only('link_id').first()
if not raw_data:
return []
else:
versions = []
rvs = RawData.objects(link_id=raw_data.link_id).only('id',
'title',
'version',
'data')
for rv in rvs:
link = reverse('crits.raw_data.views.raw_data_details',
args=(rv.id,))
versions.append({'title': rv.title,
'version': rv.version,
'data': rv.data,
'link': link})
return versions
def generate_raw_data_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = RawData
type_ = "raw_data"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Raw Data",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%s.views.%s_listing' % (type_,
type_),
args=('jtlist',)),
'deleteurl': reverse('crits.%s.views.%s_listing' % (type_,
type_),
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'All Raw Data'",
'text': "'All'",
'click': "function () {$('#raw_data_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Raw Data'",
'text': "'New'",
'click': "function () {$('#raw_data_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Raw Data'",
'text': "'In Progress'",
'click': "function () {$('#raw_data_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Raw Data'",
'text': "'Analyzed'",
'click': "function () {$('#raw_data_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Raw Data'",
'text': "'Deprecated'",
'click': "function () {$('#raw_data_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Raw Data'",
'text': "'Add Raw Data'",
'click': "function () {$('#new-raw-data').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%s_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def handle_raw_data_file(data, source_name, user=None,
description=None, title=None, data_type=None,
tool_name=None, tool_version=None, tool_details=None,
link_id=None, method='', reference='',
copy_rels=False, bucket_list=None, ticket=None):
"""
Add RawData.
:param data: The data of the RawData.
:type data: str
:param source_name: The source which provided this RawData.
:type source_name: str,
:class:`crits.core.crits_mongoengine.EmbeddedSource`,
list of :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param user: The user adding the RawData.
:type user: str
:param description: Description of the RawData.
:type description: str
:param title: Title of the RawData.
:type title: str
:param data_type: Datatype of the RawData.
:type data_type: str
:param tool_name: Name of the tool used to acquire/generate the RawData.
:type tool_name: str
:param tool_version: Version of the tool.
:type tool_version: str
:param tool_details: Details about the tool.
:type tool_details: str
:param link_id: LinkId to tie this to another RawData as a new version.
:type link_id: str
:param method: The method of acquiring this RawData.
:type method: str
:param reference: A reference to the source of this RawData.
:type reference: str
:param copy_rels: Copy relationships from the previous version to this one.
:type copy_rels: bool
:param bucket_list: Bucket(s) to add to this RawData
:type bucket_list: str(comma separated) or list.
:param ticket: Ticket(s) to add to this RawData
:type ticket: str(comma separated) or list.
:returns: dict with keys:
'success' (boolean),
'message' (str),
'_id' (str) if successful.
"""
if not data or not title or not data_type:
status = {
'success': False,
'message': 'No data object, title, or data type passed in'
}
return status
if not source_name:
return {"success" : False, "message" : "Missing source information."}
rdt = RawDataType.objects(name=data_type).first()
if not rdt:
status = {
'success': False,
'message': 'Invalid data type passed in'
}
return status
if len(data) <= 0:
status = {
'success': False,
'message': 'Data length <= 0'
}
return status
# generate md5 and timestamp
md5 = hashlib.md5(data).hexdigest()
timestamp = datetime.datetime.now()
# generate raw_data
is_rawdata_new = False
raw_data = RawData.objects(md5=md5).first()
if not raw_data:
raw_data = RawData()
raw_data.created = timestamp
raw_data.description = description
raw_data.md5 = md5
#raw_data.source = [source]
raw_data.data = data
raw_data.title = title
raw_data.data_type = data_type
raw_data.add_tool(name=tool_name,
version=tool_version,
details=tool_details)
is_rawdata_new = True
# generate new source information and add to sample
if isinstance(source_name, basestring) and len(source_name) > 0:
source = create_embedded_source(source_name,
date=timestamp,
method=method,
reference=reference,
analyst=user)
# this will handle adding a new source, or an instance automatically
raw_data.add_source(source)
elif isinstance(source_name, EmbeddedSource):
raw_data.add_source(source_name, method=method, reference=reference)
elif isinstance(source_name, list) and len(source_name) > 0:
for s in source_name:
if isinstance(s, EmbeddedSource):
raw_data.add_source(s, method=method, reference=reference)
#XXX: need to validate this is a UUID
if link_id:
raw_data.link_id = link_id
if copy_rels:
rd2 = RawData.objects(link_id=link_id).first()
if rd2:
if len(rd2.relationships):
raw_data.save(username=user)
raw_data.reload()
for rel in rd2.relationships:
# Get object to relate to.
rel_item = class_from_id(rel.rel_type, rel.rel_object_id)
if rel_item:
raw_data.add_relationship(rel_item,
rel.relationship,
rel_date=rel.relationship_date,
analyst=user)
raw_data.version = len(RawData.objects(link_id=link_id)) + 1
if bucket_list:
raw_data.add_bucket_list(bucket_list, user)
if ticket:
raw_data.add_ticket(ticket, user);
# save raw_data
raw_data.save(username=user)
# run raw_data triage
if is_rawdata_new:
raw_data.reload()
run_triage(raw_data, user)
status = {
'success': True,
'message': 'Uploaded raw_data',
'_id': raw_data.id,
'object': raw_data
}
return status
def update_raw_data_tool_details(_id, details, analyst):
"""
Update the RawData tool details.
:param _id: ObjectId of the RawData to update.
:type _id: str
:param details: The detail to set.
:type detail: str
:param analyst: The user updating the details.
:type analyst: str
:returns: None
:raises: ValidationError
"""
raw_data = RawData.objects(id=_id).first()
raw_data.tool.details = details
try:
raw_data.save(username=analyst)
return None
except ValidationError, e:
return e
def update_raw_data_tool_name(_id, name, analyst):
"""
Update the RawData tool name.
:param _id: ObjectId of the RawData to update.
:type _id: str
:param name: The name to set.
:type name: str
:param analyst: The user updating the name.
:type analyst: str
:returns: None
:raises: ValidationError
"""
raw_data = RawData.objects(id=_id).first()
raw_data.tool.name = name
try:
raw_data.save(username=analyst)
return None
except ValidationError, e:
return e
def update_raw_data_type(_id, data_type, analyst):
"""
Update the RawData data type.
:param _id: ObjectId of the RawData to update.
:type _id: str
:param data_type: The data type to set.
:type data_type: str
:param analyst: The user updating the data type.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
raw_data = RawData.objects(id=_id).first()
data_type = RawDataType.objects(name=data_type).first()
if not data_type:
return None
else:
raw_data.data_type = data_type.name
try:
raw_data.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': str(e)}
def update_raw_data_highlight_comment(_id, comment, line, analyst):
"""
Update a highlight comment.
:param _id: ObjectId of the RawData to update.
:type _id: str
:param comment: The comment to add.
:type comment: str
:param line: The line this comment is associated with.
:type line: str, int
:param analyst: The user updating the comment.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
raw_data = RawData.objects(id=_id).first()
if not raw_data:
return None
else:
for highlight in raw_data.highlights:
if highlight.line == int(line):
highlight.comment = comment
try:
raw_data.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': str(e)}
return {'success': False, 'message': 'Could not find highlight.'}
def update_raw_data_highlight_date(_id, date, line, analyst):
"""
Update a highlight date.
:param _id: ObjectId of the RawData to update.
:type _id: str
:param date: The date to set.
:type date: str
:param line: The line this date is associated with.
:type line: str, int
:param analyst: The user updating the date.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
raw_data = RawData.objects(id=_id).first()
if not raw_data:
return None
else:
for highlight in raw_data.highlights:
if highlight.line == int(line):
highlight.line_date = parse(date, fuzzy=True)
try:
raw_data.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': str(e)}
return {'success': False, 'message': 'Could not find highlight.'}
def new_inline_comment(_id, comment, line_num, analyst):
"""
Add a new inline comment.
:param _id: ObjectId of the RawData to update.
:type _id: str
:param comment: The comment to add.
:type comment: str
:param line_num: The line this comment is associated with.
:type line_num: str, int
:param analyst: The user adding this comment.
:type analyst: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"line" (int),
"html" (str)
:raises: ValidationError
"""
raw_data = RawData.objects(id=_id).first()
raw_data.add_inline_comment(comment, line_num, analyst)
try:
raw_data.save(username=analyst)
html = render_to_string('inline_comment.html',
{'username': analyst,
'comment': comment,
'date': datetime.datetime.now(),
'line': line_num,
'raw_data': {'id': _id}})
return {'success': True,
'message': 'Comment for line %s added successfully!' % line_num,
'inline': True,
'line': line_num,
'html': html,
}
except ValidationError, e:
return e
def new_highlight(_id, line_num, line_data, analyst):
"""
Add a new highlight.
:param _id: ObjectId of the RawData to update.
:type _id: str
:param line_num: The line to highlight.
:type line_num: str, int
:param line_data: The data on this line.
:type line_data: str
:param analyst: The user highlighting this line.
:type analyst: str
:returns: dict with keys "success" (boolean) and "html" (str)
:raises: ValidationError
"""
raw_data = RawData.objects(id=_id).first()
raw_data.add_highlight(line_num, line_data, analyst)
try:
raw_data.save(username=analyst)
html = render_to_string('raw_data_highlights.html',
{'raw_data': {'id': _id,
'highlights': raw_data.highlights}})
return {'success': True,
'html': html,
}
except ValidationError, e:
return e
def delete_highlight(_id, line_num, analyst):
"""
Delete a highlight from RawData.
:param _id: The ObjectId of the RawData to update.
:type _id: str
:param line_num: Line number of the highlight to delete.
:type line_num: str, int
:param analyst: The user deleting this highlight.
:type analyst: str
:returns: dict with keys "success" (boolean) and "html" (str)
"""
raw_data = RawData.objects(id=_id).first()
highlights = len(raw_data.highlights)
raw_data.remove_highlight(line_num, analyst)
if len(raw_data.highlights) < highlights:
try:
raw_data.save(username=analyst)
html = render_to_string('raw_data_highlights.html',
{'raw_data': {'id': _id,
'highlights': raw_data.highlights}})
return {'success': True,
'html': html,
}
except ValidationError, e:
return e
else:
return {'success': False}
def delete_raw_data(_id, username=None):
"""
Delete RawData from CRITs.
:param _id: The ObjectId of the RawData to delete.
:type _id: str
:param username: The user deleting this RawData.
:type username: str
:returns: bool
"""
if is_admin(username):
raw_data = RawData.objects(id=_id).first()
if raw_data:
raw_data.delete(username=username)
return True
else:
return False
else:
return False
def add_new_raw_data_type(data_type, analyst):
"""
Add a new RawData datatype to CRITs.
:param data_type: The new datatype to add.
:type data_type: str
:param analyst: The user adding the new datatype.
:type analyst: str
:returns: bool
"""
data_type = data_type.strip()
try:
raw_data_type = RawDataType.objects(name=data_type).first()
if raw_data_type:
return False
raw_data_type = RawDataType()
raw_data_type.name = data_type
raw_data_type.save(username=analyst)
return True
except ValidationError:
return False
|
|
# -*- coding: utf-8 -*-
#
# SelfTest/Hash/common.py: Common code for Crypto.SelfTest.Hash
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-testing for PyCrypto hash modules"""
__revision__ = "$Id$"
import sys
import unittest
from binascii import a2b_hex, b2a_hex
from Crypto.Util.py3compat import *
# For compatibility with Python 2.1 and Python 2.2
if sys.hexversion < 0x02030000:
# Python 2.1 doesn't have a dict() function
# Python 2.2 dict() function raises TypeError if you do dict(MD5='blah')
def dict(**kwargs):
return kwargs.copy()
else:
dict = dict
class _NoDefault: pass # sentinel object
def _extract(d, k, default=_NoDefault):
"""Get an item from a dictionary, and remove it from the dictionary."""
try:
retval = d[k]
except KeyError:
if default is _NoDefault:
raise
return default
del d[k]
return retval
# Generic cipher test case
class CipherSelfTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
# Extract the parameters
params = params.copy()
self.description = _extract(params, 'description')
self.key = b(_extract(params, 'key'))
self.plaintext = b(_extract(params, 'plaintext'))
self.ciphertext = b(_extract(params, 'ciphertext'))
self.module_name = _extract(params, 'module_name', None)
mode = _extract(params, 'mode', None)
self.mode_name = str(mode)
if mode is not None:
# Block cipher
self.mode = getattr(self.module, "MODE_" + mode)
self.iv = _extract(params, 'iv', None)
if self.iv is not None: self.iv = b(self.iv)
# Only relevant for OPENPGP mode
self.encrypted_iv = _extract(params, 'encrypted_iv', None)
if self.encrypted_iv is not None:
self.encrypted_iv = b(self.encrypted_iv)
else:
# Stream cipher
self.mode = None
self.iv = None
self.extra_params = params
def shortDescription(self):
return self.description
def _new(self, do_decryption=0):
params = self.extra_params.copy()
# Handle CTR mode parameters. By default, we use Counter.new(self.module.block_size)
if hasattr(self.module, "MODE_CTR") and self.mode == self.module.MODE_CTR:
from Crypto.Util import Counter
ctr_class = _extract(params, 'ctr_class', Counter.new)
ctr_params = _extract(params, 'ctr_params', {}).copy()
if 'prefix' in ctr_params: ctr_params['prefix'] = a2b_hex(b(ctr_params['prefix']))
if 'suffix' in ctr_params: ctr_params['suffix'] = a2b_hex(b(ctr_params['suffix']))
if 'nbits' not in ctr_params:
ctr_params['nbits'] = 8*(self.module.block_size - len(ctr_params.get('prefix', '')) - len(ctr_params.get('suffix', '')))
params['counter'] = ctr_class(**ctr_params)
if self.mode is None:
# Stream cipher
return self.module.new(a2b_hex(self.key), **params)
elif self.iv is None:
# Block cipher without iv
return self.module.new(a2b_hex(self.key), self.mode, **params)
else:
# Block cipher with iv
if do_decryption and self.mode == self.module.MODE_OPENPGP:
# In PGP mode, the IV to feed for decryption is the *encrypted* one
return self.module.new(a2b_hex(self.key), self.mode, a2b_hex(self.encrypted_iv), **params)
else:
return self.module.new(a2b_hex(self.key), self.mode, a2b_hex(self.iv), **params)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
ct1 = b2a_hex(self._new().encrypt(plaintext))
pt1 = b2a_hex(self._new(1).decrypt(ciphertext))
ct2 = b2a_hex(self._new().encrypt(plaintext))
pt2 = b2a_hex(self._new(1).decrypt(ciphertext))
if hasattr(self.module, "MODE_OPENPGP") and self.mode == self.module.MODE_OPENPGP:
# In PGP mode, data returned by the first encrypt()
# is prefixed with the encrypted IV.
# Here we check it and then remove it from the ciphertexts.
eilen = len(self.encrypted_iv)
self.assertEqual(self.encrypted_iv, ct1[:eilen])
self.assertEqual(self.encrypted_iv, ct2[:eilen])
ct1 = ct1[eilen:]
ct2 = ct2[eilen:]
self.assertEqual(self.ciphertext, ct1) # encrypt
self.assertEqual(self.ciphertext, ct2) # encrypt (second time)
self.assertEqual(self.plaintext, pt1) # decrypt
self.assertEqual(self.plaintext, pt2) # decrypt (second time)
class CipherStreamingSelfTest(CipherSelfTest):
def shortDescription(self):
desc = self.module_name
if self.mode is not None:
desc += " in %s mode" % (self.mode_name,)
return "%s should behave like a stream cipher" % (desc,)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
# The cipher should work like a stream cipher
# Test counter mode encryption, 3 bytes at a time
ct3 = []
cipher = self._new()
for i in range(0, len(plaintext), 3):
ct3.append(cipher.encrypt(plaintext[i:i+3]))
ct3 = b2a_hex(b("").join(ct3))
self.assertEqual(self.ciphertext, ct3) # encryption (3 bytes at a time)
# Test counter mode decryption, 3 bytes at a time
pt3 = []
cipher = self._new()
for i in range(0, len(ciphertext), 3):
pt3.append(cipher.encrypt(ciphertext[i:i+3]))
# PY3K: This is meant to be text, do not change to bytes (data)
pt3 = b2a_hex(b("").join(pt3))
self.assertEqual(self.plaintext, pt3) # decryption (3 bytes at a time)
class CTRSegfaultTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """Regression test: %s.new(key, %s.MODE_CTR) should raise TypeError, not segfault""" % (self.module_name, self.module_name)
def runTest(self):
self.assertRaises(TypeError, self.module.new, a2b_hex(self.key), self.module.MODE_CTR)
class CTRWraparoundTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """Regression test: %s with MODE_CTR should raise OverflowError on wraparound when shortcut used""" % (self.module_name,)
def runTest(self):
from Crypto.Util import Counter
for disable_shortcut in (0, 1): # (False, True) Test CTR-mode shortcut and PyObject_CallObject code paths
for little_endian in (0, 1): # (False, True) Test both endiannesses
ctr = Counter.new(8*self.module.block_size, initial_value=2**(8*self.module.block_size)-1, little_endian=little_endian, disable_shortcut=disable_shortcut)
cipher = self.module.new(a2b_hex(self.key), self.module.MODE_CTR, counter=ctr)
block = b("\x00") * self.module.block_size
cipher.encrypt(block)
self.assertRaises(OverflowError, cipher.encrypt, block)
class CFBSegmentSizeTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.description = params['description']
def shortDescription(self):
return self.description
def runTest(self):
"""Regression test: m.new(key, m.MODE_CFB, segment_size=N) should require segment_size to be a multiple of 8 bits"""
for i in range(1, 8):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key), self.module.MODE_CFB, segment_size=i)
self.module.new(a2b_hex(self.key), self.module.MODE_CFB, "\0"*self.module.block_size, segment_size=8) # should succeed
class RoundtripTest(unittest.TestCase):
def __init__(self, module, params):
from Crypto import Random
unittest.TestCase.__init__(self)
self.module = module
self.iv = Random.get_random_bytes(module.block_size)
self.key = b(params['key'])
self.plaintext = 100 * b(params['plaintext'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """%s .decrypt() output of .encrypt() should not be garbled""" % (self.module_name,)
def runTest(self):
for mode in (self.module.MODE_ECB, self.module.MODE_CBC, self.module.MODE_CFB, self.module.MODE_OFB, self.module.MODE_OPENPGP):
encryption_cipher = self.module.new(a2b_hex(self.key), mode, self.iv)
ciphertext = encryption_cipher.encrypt(self.plaintext)
if mode != self.module.MODE_OPENPGP:
decryption_cipher = self.module.new(a2b_hex(self.key), mode, self.iv)
else:
eiv = ciphertext[:self.module.block_size+2]
ciphertext = ciphertext[self.module.block_size+2:]
decryption_cipher = self.module.new(a2b_hex(self.key), mode, eiv)
decrypted_plaintext = decryption_cipher.decrypt(ciphertext)
self.assertEqual(self.plaintext, decrypted_plaintext)
class PGPTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
def shortDescription(self):
return "MODE_PGP was implemented incorrectly and insecurely. It's completely banished now."
def runTest(self):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_PGP)
class IVLengthTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
def shortDescription(self):
return "Check that all modes except MODE_ECB and MODE_CTR require an IV of the proper length"
def runTest(self):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_CBC, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_CFB, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_OFB, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_OPENPGP, "")
self.module.new(a2b_hex(self.key), self.module.MODE_ECB, "")
self.module.new(a2b_hex(self.key), self.module.MODE_CTR, "", counter=self._dummy_counter)
def _dummy_counter(self):
return "\0" * self.module.block_size
def make_block_tests(module, module_name, test_data):
tests = []
extra_tests_added = 0
for i in range(len(test_data)):
row = test_data[i]
# Build the "params" dictionary
params = {'mode': 'ECB'}
if len(row) == 3:
(params['plaintext'], params['ciphertext'], params['key']) = row
elif len(row) == 4:
(params['plaintext'], params['ciphertext'], params['key'], params['description']) = row
elif len(row) == 5:
(params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row
params.update(extra_params)
else:
raise AssertionError("Unsupported tuple size %d" % (len(row),))
# Build the display-name for the test
p2 = params.copy()
p_key = _extract(p2, 'key')
p_plaintext = _extract(p2, 'plaintext')
p_ciphertext = _extract(p2, 'ciphertext')
p_description = _extract(p2, 'description', None)
p_mode = p2.get('mode', 'ECB')
if p_mode == 'ECB':
_extract(p2, 'mode', 'ECB')
if p_description is not None:
description = p_description
elif p_mode == 'ECB' and not p2:
description = "p=%s, k=%s" % (p_plaintext, p_key)
else:
description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2)
name = "%s #%d: %s" % (module_name, i+1, description)
params['description'] = name
params['module_name'] = module_name
# Add extra test(s) to the test suite before the current test
if not extra_tests_added:
tests += [
CTRSegfaultTest(module, params),
CTRWraparoundTest(module, params),
CFBSegmentSizeTest(module, params),
RoundtripTest(module, params),
PGPTest(module, params),
IVLengthTest(module, params),
]
extra_tests_added = 1
# Add the current test to the test suite
tests.append(CipherSelfTest(module, params))
# When using CTR mode, test that the interface behaves like a stream cipher
if p_mode == 'CTR':
tests.append(CipherStreamingSelfTest(module, params))
# When using CTR mode, test the non-shortcut code path.
if p_mode == 'CTR' and 'ctr_class' not in params:
params2 = params.copy()
params2['description'] += " (shortcut disabled)"
ctr_params2 = params.get('ctr_params', {}).copy()
params2['ctr_params'] = ctr_params2
if 'disable_shortcut' not in params2['ctr_params']:
params2['ctr_params']['disable_shortcut'] = 1
tests.append(CipherSelfTest(module, params2))
return tests
def make_stream_tests(module, module_name, test_data):
tests = []
for i in range(len(test_data)):
row = test_data[i]
# Build the "params" dictionary
params = {}
if len(row) == 3:
(params['plaintext'], params['ciphertext'], params['key']) = row
elif len(row) == 4:
(params['plaintext'], params['ciphertext'], params['key'], params['description']) = row
elif len(row) == 5:
(params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row
params.update(extra_params)
else:
raise AssertionError("Unsupported tuple size %d" % (len(row),))
# Build the display-name for the test
p2 = params.copy()
p_key = _extract(p2, 'key')
p_plaintext = _extract(p2, 'plaintext')
p_ciphertext = _extract(p2, 'ciphertext')
p_description = _extract(p2, 'description', None)
if p_description is not None:
description = p_description
elif not p2:
description = "p=%s, k=%s" % (p_plaintext, p_key)
else:
description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2)
name = "%s #%d: %s" % (module_name, i+1, description)
params['description'] = name
params['module_name'] = module_name
# Add the test to the test suite
tests.append(CipherSelfTest(module, params))
tests.append(CipherStreamingSelfTest(module, params))
return tests
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
# Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
# Python3
from queue import Empty
except ImportError:
# Python2
from Queue import Empty
import math
import itertools
import sys
import threading
import time
from catkin_tools.common import disable_wide_log
from catkin_tools.common import format_time_delta
from catkin_tools.common import format_time_delta_short
from catkin_tools.common import remove_ansi_escape
from catkin_tools.common import terminal_width
from catkin_tools.common import wide_log
from catkin_tools.notifications import notify
from catkin_tools.terminal_color import fmt
from catkin_tools.terminal_color import sanitize
from catkin_tools.terminal_color import ColorMapper
from catkin_tools.execution import job_server
# This map translates more human reable format strings into colorized versions
_color_translation_map = {
# 'output': 'colorized_output'
'': fmt('@!' + sanitize('') + '@|'),
# Job starting
"Starting >>> {:<{}}":
fmt("Starting @!@{gf}>>>@| @!@{cf}{:<{}}@|"),
# Job finishing
"Finished <<< {:<{}} [ {} ]":
fmt("@!@{kf}Finished@| @{gf}<<<@| @{cf}{:<{}}@| [ @{yf}{}@| ]"),
"Failed <<< {:<{}} [ {} ]":
fmt("@!@{rf}Failed@| @{rf}<<<@| @{cf}{:<{}}@| [ @{yf}{}@| ]"),
# Job abandoning
"Abandoned <<< {:<{}} [ {} ]":
fmt("@!@{rf}Abandoned@| @{rf}<<<@| @{cf}{:<{}}@| [ @{yf}{}@| ]"),
"Depends on failed job {}":
fmt("@{yf}Depends on failed job @!{}@|"),
"Depends on failed job {} via {}":
fmt("@{yf}Depends on failed job @!{}@| @{yf}via @!{}@|"),
# Stage finishing
"Starting >> {}:{}":
fmt("Starting @{gf} >>@| @{cf}{}@|:@{bf}{}@|"),
"Subprocess > {}:{} `{}`":
fmt("Subprocess @!@{gf}>@| @{cf}{}@|:@{bf}{}@| @!@{kf}`{}`@|"),
"Finished << {}:{}":
fmt("@!@{kf}Finished@| @{gf} <<@| @{cf}{}@|:@{bf}{}@|"),
"Failed << {}:{:<{}} [ Exited with code {} ]":
fmt("@!@{rf}Failed@| @{rf} <<@| @{cf}{}@|:@{bf}{:<{}}@|[ @{yf}Exited with code @!@{yf}{}@| ]"),
"Output << {}:{} {}":
fmt("@!@{kf}Output@| @!@{kf} <<@| @{cf}{}@|:@{bf}{}@| @!@{kf}{}@|"),
"Warnings << {}:{} {}":
fmt("@!@{yf}Warnings@| @{yf} <<@| @{cf}{}@|:@{bf}{}@| @!@{yf}{}@|"),
"Errors << {}:{} {}":
fmt("@!@{rf}Errors@| @{rf} <<@| @{cf}{}@|:@{bf}{}@| @!@{rf}{}@|"),
# Interleaved
"[{}:{}] ":
fmt("[@{cf}{}@|:@{bf}{}@|] "),
# Status line
"[{} {} s] [{}/{} complete] [{}/{} jobs] [{} queued]":
fmt("[@{pf}{}@| - @{yf}{}@|] [@!@{gf}{}@|/@{gf}{}@| complete] [@!@{gf}{}@|/@{gf}{}@| jobs] [@!@{kf}{}@| queued]"),
"[{}:{} - {}]":
fmt("[@{cf}{}@|:@{bf}{}@| - @{yf}{}@|]"),
"[{}:{} ({}%) - {}]":
fmt("[@{cf}{}@|:@{bf}{}@| @{bf}({}%)@| - @{yf}{}@|]"),
# Summary
"[{}] Summary: All {} {} succeeded!":
fmt("[{}] @/@!@{gf}Summary:@| @/All @!{}@| @/{} succeeded!@|"),
"[{}] Summary: {} of {} {} succeeded.":
fmt("[{}] @/@!@{yf}Summary:@| @/@!{}@| @/of @!{}@| @/{} succeeded.@|"),
"[{}] Warnings: None.":
fmt("[{}] @/@!@{kf}Warnings: None.@|"),
"[{}] Warnings: {} {} succeeded with warnings.":
fmt("[{}] @/@!@{yf}Warnings:@| @/@!{}@| @/{} succeeded with warnings.@|"),
"[{}] Skipped: None.":
fmt("[{}] @/@!@{kf}Skipped: None.@|"),
"[{}] Skipped: {} {} skipped.":
fmt("[{}] @/@!@{yf}Skipped:@| @/@!{}@| @/{} skipped.@|"),
"[{}] Ignored: None.":
fmt("[{}] @/@!@{kf}Ignored: None.@|"),
"[{}] Ignored: {} {} were skipped or are blacklisted.":
fmt("[{}] @/@!@{pf}Ignored:@| @/@!{}@| @/{} were skipped or are blacklisted.@|"),
"[{}] Failed: No {} failed.":
fmt("[{}] @/@!@{kf}Failed: None.@|"),
"[{}] Failed: {} {} failed.":
fmt("[{}] @/@!@{rf}Failed:@| @/@!{}@| @/{} failed.@|"),
"[{}] Abandoned: No {} were abandoned.":
fmt("[{}] @/@!@{kf}Abandoned: None.@|"),
"[{}] Abandoned: {} {} were abandoned.":
fmt("[{}] @/@!@{rf}Abandoned:@| @/@!{}@| @/{} were abandoned.@|"),
"[{}] - {}":
fmt("[{}] @{cf}{}@|"),
"[{}] Runtime: {} total.":
fmt("[{}] @/@!Runtime:@| @/{} total.@|")
}
color_mapper = ColorMapper(_color_translation_map)
clr = color_mapper.clr
def print_items_in_columns(items, n_cols):
"""Print items in columns
:param items: list of tuples (identifier, template) where the template takes `jid` as a parameter
:param n_cols: number of columns
"""
# Format all the items
formatted_items = [t.format(jid=j) for j, t in items]
# Compute the number of rows
n_items = len(items)
if n_items <= n_cols:
n_cols = 1
n_rows = int(math.ceil(n_items / float(n_cols)))
# Print each row
for r in range(n_rows):
wide_log(''.join(formatted_items[(r * n_cols):((r + 1) * n_cols)]))
class ConsoleStatusController(threading.Thread):
"""Status thread for displaying events to the console.
TODO: switch to interleaved output if only one job is running
"""
def __init__(
self,
label,
job_labels,
jobs,
max_toplevel_jobs,
available_jobs,
whitelisted_jobs,
blacklisted_jobs,
event_queue,
show_notifications=False,
show_stage_events=False,
show_buffered_stdout=False,
show_buffered_stderr=True,
show_live_stdout=False,
show_live_stderr=False,
show_compact_io=False,
show_active_status=True,
show_summary=True,
show_full_summary=False,
show_repro_cmd=True,
active_status_rate=10.0,
pre_start_time=None):
"""
:param label: The label for this task (build, clean, etc)
:param job_labels: The labels to be used for the jobs (packages, tests, etc)
:param event_queue: The event queue used by an Executor
:param show_notifications: Show a libnotify notification when the jobs are finished
:param show_stage_events: Show events relating to stages in each job
:param show_buffered_stdout: Show stdout from jobs as they finish
:param show_buffered_stderr: Show stderr from jobs as they finish
:param show_live_stdout: Show stdout lines from jobs as they're generated
:param show_live_stderr: Show stdout lines from jobs as they're generated
:param show_compact_io: Don't print blank lines from redirected io
:param show_active_status: Periodically show a status line displaying the active jobs
:param show_summary: Show numbers of jobs that completed with errors and warnings
:param show_full_summary: Show lists of jobs in each termination category
:param show_repro_cmd: Show the commands to reproduce failed stages
:param active_status_rate: The rate in Hz at which the status line should be printed
:param pre_start_time: The actual start time to report, if preprocessing was done
"""
super(ConsoleStatusController, self).__init__()
self.label = label
self.job_label = job_labels[0]
self.jobs_label = job_labels[1]
self.event_queue = event_queue
self.max_toplevel_jobs = max_toplevel_jobs
self.show_notifications = show_notifications
self.show_stage_events = show_stage_events
self.show_buffered_stdout = show_buffered_stdout
self.show_buffered_stderr = show_buffered_stderr
self.show_live_stdout = show_live_stdout
self.show_live_stderr = show_live_stderr
self.show_compact_io = show_compact_io
self.show_active_status = show_active_status
self.show_full_summary = show_full_summary
self.show_summary = show_summary
self.show_repro_cmd = show_repro_cmd
self.active_status_rate = active_status_rate
self.pre_start_time = pre_start_time
self.keep_running = True
# Map from jid -> job
self.jobs = dict([(j.jid, j) for j in jobs])
self.available_jobs = available_jobs
self.blacklisted_jobs = blacklisted_jobs
self.whitelisted_jobs = whitelisted_jobs
# Compute the max job id length when combined with stage labels
self.max_jid_length = 1
if len(self.jobs) > 0:
self.max_jid_length += max(
[len(jid) + max([len(s.label) for s in job.stages] or [0])
for jid, job
in self.jobs.items()]
)
def print_exec_summary(self, completed_jobs, warned_jobs, failed_jobs):
"""
Print verbose execution summary.
"""
# Calculate the longest jid
max_jid_len = max([len(jid) for jid in self.available_jobs])
templates = {
'successful': clr(" [@!@{gf}Successful@|] @{cf}{jid:<%d}@|" % max_jid_len),
'warned': clr(" [ @!@{yf}Warned@|] @{cf}{jid:<%d}@|" % max_jid_len),
'failed': clr(" [ @!@{rf}Failed@|] @{cf}{jid:<%d}@|" % max_jid_len),
'ignored': clr(" [ @!@{kf}Ignored@|] @{cf}{jid:<%d}@|" % max_jid_len),
'abandoned': clr(" [ @!@{rf}Abandoned@|] @{cf}{jid:<%d}@|" % max_jid_len),
}
# Calculate the maximum _printed_ length for each template
max_column_len = max([
len(remove_ansi_escape(t.format(jid=("?" * max_jid_len))))
for t in templates.values()
])
# Calculate the number of columns
number_of_columns = int((terminal_width() / max_column_len) or 1)
# Construct different categories of jobs (jid -> output template)
successfuls = {}
warneds = {}
faileds = {}
ignoreds = {}
abandoneds = {}
non_whitelisted = {}
blacklisted = {}
# Give each package an output template to use
for jid in self.available_jobs:
if jid in self.blacklisted_jobs:
blacklisted[jid] = templates['ignored']
elif jid not in self.jobs:
ignoreds[jid] = templates['ignored']
elif len(self.whitelisted_jobs) > 0 and jid not in self.whitelisted_jobs:
non_whitelisted[jid] = templates['ignored']
elif jid in completed_jobs:
if jid in failed_jobs:
faileds[jid] = templates['failed']
elif jid in warned_jobs:
warneds[jid] = templates['warned']
else:
successfuls[jid] = templates['successful']
else:
abandoneds[jid] = templates['abandoned']
# Combine successfuls and ignoreds, sort by key
if len(successfuls) + len(ignoreds) > 0:
wide_log("")
wide_log(clr("[{}] Successful {}:").format(self.label, self.jobs_label))
wide_log("")
print_items_in_columns(
sorted(itertools.chain(successfuls.items(), ignoreds.items())),
number_of_columns)
else:
wide_log("")
wide_log(clr("[{}] No {} succeeded.").format(self.label, self.jobs_label))
wide_log("")
# Print out whitelisted jobs
if len(non_whitelisted) > 0:
wide_log("")
wide_log(clr("[{}] Non-whitelisted {}:").format(self.label, self.jobs_label))
wide_log("")
print_items_in_columns(sorted(non_whitelisted.items()), number_of_columns)
# Print out blacklisted jobs
if len(blacklisted) > 0:
wide_log("")
wide_log(clr("[{}] Blacklisted {}:").format(self.label, self.jobs_label))
wide_log("")
print_items_in_columns(sorted(blacklisted.items()), number_of_columns)
# Print out jobs that failed
if len(faileds) > 0:
wide_log("")
wide_log(clr("[{}] Failed {}:").format(self.label, self.jobs_label))
wide_log("")
print_items_in_columns(sorted(faileds.items()), number_of_columns)
# Print out jobs that were abandoned
if len(abandoneds) > 0:
wide_log("")
wide_log(clr("[{}] Abandoned {}:").format(self.label, self.jobs_label))
wide_log("")
print_items_in_columns(sorted(abandoneds.items()), number_of_columns)
wide_log("")
def print_compact_summary(self, completed_jobs, warned_jobs, failed_jobs):
"""Print a compact build summary."""
notification_title = ""
notification_msg = []
# Print error summary
if len(completed_jobs) == len(self.jobs) and all(completed_jobs.items()) and len(failed_jobs) == 0:
notification_title = "{} Succeeded".format(self.label.capitalize())
notification_msg.append("All {} {} succeeded!".format(len(self.jobs), self.jobs_label))
wide_log(clr('[{}] Summary: All {} {} succeeded!').format(
self.label,
len(self.jobs),
self.jobs_label))
else:
notification_msg.append("{} of {} {} succeeded.".format(
len([succeeded for jid, succeeded in completed_jobs.items() if succeeded]),
len(self.jobs), self.jobs_label))
wide_log(clr('[{}] Summary: {} of {} {} succeeded.').format(
self.label,
len([succeeded for jid, succeeded in completed_jobs.items() if succeeded]),
len(self.jobs),
self.jobs_label))
# Display number of ignored jobs (jobs which shouldn't have been built)
all_ignored_jobs = [j for j in self.available_jobs if j not in self.jobs]
if len(all_ignored_jobs) == 0:
wide_log(clr('[{}] Ignored: None.').format(
self.label))
else:
notification_msg.append("{} {} were skipped.".format(len(all_ignored_jobs), self.jobs_label))
wide_log(clr('[{}] Ignored: {} {} were skipped or are blacklisted.').format(
self.label,
len(all_ignored_jobs),
self.jobs_label))
# Display number of jobs which produced warnings
if len(warned_jobs) == 0:
wide_log(clr('[{}] Warnings: None.').format(
self.label))
else:
notification_title = "{} Succeeded with Warnings".format(self.label.capitalize())
notification_msg.append("{} {} succeeded with warnings.".format(len(warned_jobs), self.jobs_label))
wide_log(clr('[{}] Warnings: {} {} succeeded with warnings.').format(
self.label,
len(warned_jobs),
self.jobs_label))
# Display number of abandoned jobs
all_abandoned_jobs = [j for j in self.jobs if j not in completed_jobs]
if len(all_abandoned_jobs) == 0:
wide_log(clr('[{}] Abandoned: No {} were abandoned.').format(
self.label,
self.jobs_label))
else:
notification_title = "{} Incomplete".format(self.label.capitalize())
notification_msg.append("{} {} were abandoned.".format(len(all_abandoned_jobs), self.jobs_label))
wide_log(clr('[{}] Abandoned: {} {} were abandoned.').format(
self.label,
len(all_abandoned_jobs),
self.jobs_label))
# Display number of failed jobs
if len(failed_jobs) == 0:
wide_log(clr('[{}] Failed: No {} failed.').format(
self.label,
self.jobs_label))
else:
notification_title = "{} Failed".format(self.label.capitalize())
notification_msg.append("{} {} failed.".format(len(failed_jobs), self.jobs_label))
wide_log(clr('[{}] Failed: {} {} failed.').format(
self.label,
len(failed_jobs),
self.jobs_label))
if self.show_notifications:
if len(failed_jobs) != 0:
notify(notification_title, "\n".join(notification_msg), icon_image='catkin_icon_red.png')
elif len(warned_jobs) != 0:
notify(notification_title, "\n".join(notification_msg), icon_image='catkin_icon_yellow.png')
else:
notify(notification_title, "\n".join(notification_msg))
def run(self):
queued_jobs = []
active_jobs = []
completed_jobs = {}
failed_jobs = []
warned_jobs = []
cumulative_times = dict()
start_times = dict()
active_stages = dict()
start_time = self.pre_start_time or time.time()
last_update_time = time.time()
# If the status rate is too low, just disable it
if self.active_status_rate < 1E-3:
self.show_active_status = False
else:
update_duration = 1.0 / self.active_status_rate
# Disable the wide log padding if the status is disabled
if not self.show_active_status:
disable_wide_log()
while True:
# Check if we should stop
if not self.keep_running:
wide_log(clr('[{}] An internal error occurred!').format(self.label))
return
# Write a continuously-updated status line
if self.show_active_status:
# Try to get an event from the queue (non-blocking)
try:
event = self.event_queue.get(False)
except Empty:
# Determine if the status should be shown based on the desired
# status rate
elapsed_time = time.time() - last_update_time
show_status_now = elapsed_time > update_duration
if show_status_now:
# Print live status (overwrites last line)
status_line = clr('[{} {} s] [{}/{} complete] [{}/{} jobs] [{} queued]').format(
self.label,
format_time_delta_short(time.time() - start_time),
len(completed_jobs),
len(self.jobs),
job_server.running_jobs(),
job_server.max_jobs(),
len(queued_jobs) + len(active_jobs) - len(active_stages)
)
# Show failed jobs
if len(failed_jobs) > 0:
status_line += clr(' [@!@{rf}{}@| @{rf}failed@|]').format(len(failed_jobs))
# Check load / mem
if not job_server.load_ok():
status_line += clr(' [@!@{rf}High Load@|]')
if not job_server.mem_ok():
status_line += clr(' [@!@{rf}Low Memory@|]')
# Add active jobs
if len(active_jobs) == 0:
status_line += clr(' @/@!@{kf}Waiting for jobs...@|')
else:
active_labels = []
for j, (s, t, p) in active_stages.items():
d = format_time_delta_short(cumulative_times[j] + time.time() - t)
if p == '':
active_labels.append(clr('[{}:{} - {}]').format(j, s, d))
else:
active_labels.append(clr('[{}:{} ({}%) - {}]').format(j, s, p, d))
status_line += ' ' + ' '.join(active_labels)
# Print the status line
# wide_log(status_line)
wide_log(status_line, rhs='', end='\r')
sys.stdout.flush()
# Store this update time
last_update_time = time.time()
else:
time.sleep(max(0.0, min(update_duration - elapsed_time, 0.01)))
# Only continue when no event was received
continue
else:
# Try to get an event from the queue (blocking)
try:
event = self.event_queue.get(True)
except Empty:
break
# A `None` event is a signal to terminate
if event is None:
break
# Handle the received events
eid = event.event_id
if 'JOB_STATUS' == eid:
queued_jobs = event.data['queued']
active_jobs = event.data['active']
completed_jobs = event.data['completed']
# Check if all jobs have finished in some way
if all([len(event.data[t]) == 0 for t in ['pending', 'queued', 'active']]):
break
elif 'STARTED_JOB' == eid:
cumulative_times[event.data['job_id']] = 0.0
wide_log(clr('Starting >>> {:<{}}').format(
event.data['job_id'],
self.max_jid_length))
elif 'FINISHED_JOB' == eid:
duration = format_time_delta(cumulative_times[event.data['job_id']])
if event.data['succeeded']:
wide_log(clr('Finished <<< {:<{}} [ {} ]').format(
event.data['job_id'],
self.max_jid_length,
duration))
else:
failed_jobs.append(event.data['job_id'])
wide_log(clr('Failed <<< {:<{}} [ {} ]').format(
event.data['job_id'],
self.max_jid_length,
duration))
elif 'ABANDONED_JOB' == eid:
# Create a human-readable reason string
if 'DEP_FAILED' == event.data['reason']:
direct = event.data['dep_job_id'] == event.data['direct_dep_job_id']
if direct:
reason = clr('Depends on failed job {}').format(event.data['dep_job_id'])
else:
reason = clr('Depends on failed job {} via {}').format(
event.data['dep_job_id'],
event.data['direct_dep_job_id'])
elif 'PEER_FAILED' == event.data['reason']:
reason = clr('Unrelated job failed')
elif 'MISSING_DEPS' == event.data['reason']:
reason = clr('Depends on unknown jobs: {}').format(
', '.join([clr('@!{}@|').format(jid) for jid in event.data['dep_ids']]))
wide_log(clr('Abandoned <<< {:<{}} [ {} ]').format(
event.data['job_id'],
self.max_jid_length,
reason))
elif 'STARTED_STAGE' == eid:
active_stages[event.data['job_id']] = [event.data['stage_label'], event.time, '']
start_times[event.data['job_id']] = event.time
if self.show_stage_events:
wide_log(clr('Starting >> {}:{}').format(
event.data['job_id'],
event.data['stage_label']))
elif 'STAGE_PROGRESS' == eid:
active_stages[event.data['job_id']][2] = event.data['percent']
elif 'SUBPROCESS' == eid:
if self.show_stage_events:
wide_log(clr('Subprocess > {}:{} `{}`').format(
event.data['job_id'],
event.data['stage_label'],
event.data['stage_repro']))
elif 'FINISHED_STAGE' == eid:
# Get the stage duration
duration = event.time - start_times[event.data['job_id']]
cumulative_times[event.data['job_id']] += duration
# This is no longer the active stage for this job
del active_stages[event.data['job_id']]
header_border = None
header_border_file = sys.stdout
header_title = None
header_title_file = sys.stdout
lines = []
footer_title = None
footer_title_file = sys.stdout
footer_border = None
footer_border_file = sys.stdout
# Generate headers / borders for output
if event.data['succeeded']:
footer_title = clr(
'Finished << {}:{}').format(
event.data['job_id'],
event.data['stage_label'])
if len(event.data['stderr']) > 0:
# Mark that this job warned about something
if event.data['job_id'] not in warned_jobs:
warned_jobs.append(event.data['job_id'])
# Output contains warnings
header_border = clr('@!@{yf}' + '_' * (terminal_width() - 1) + '@|')
header_border_file = sys.stderr
header_title = clr(
'Warnings << {}:{} {}').format(
event.data['job_id'],
event.data['stage_label'],
event.data['logfile_filename'])
header_title_file = sys.stderr
footer_border = clr('@{yf}' + '.' * (terminal_width() - 1) + '@|')
footer_border_file = sys.stderr
else:
# Normal output, no warnings
header_title = clr(
'Output << {}:{} {}').format(
event.data['job_id'],
event.data['stage_label'],
event.data['logfile_filename'])
# Don't print footer title
if not self.show_stage_events:
footer_title = None
else:
# Output contains errors
header_border = clr('@!@{rf}' + '_' * (terminal_width() - 1) + '@|')
header_border_file = sys.stderr
header_title = clr(
'Errors << {}:{} {}').format(
event.data['job_id'],
event.data['stage_label'],
event.data['logfile_filename'])
header_title_file = sys.stderr
footer_border = clr('@{rf}' + '.' * (terminal_width() - 1) + '@|')
footer_border_file = sys.stderr
footer_title = clr(
'Failed << {}:{:<{}} [ Exited with code {} ]').format(
event.data['job_id'],
event.data['stage_label'],
max(0, self.max_jid_length - len(event.data['job_id'])),
event.data['retcode'])
footer_title_file = sys.stderr
lines_target = sys.stdout
if self.show_buffered_stdout:
if len(event.data['interleaved']) > 0:
lines = [
line for line in event.data['interleaved'].splitlines(True)
if (self.show_compact_io is False or len(line.strip()) > 0)
]
else:
header_border = None
header_title = None
footer_border = None
elif self.show_buffered_stderr:
if len(event.data['stderr']) > 0:
lines = [
line for line in event.data['stderr'].splitlines(True)
if (self.show_compact_io is False or len(line.strip()) > 0)
]
lines_target = sys.stderr
else:
header_border = None
header_title = None
footer_border = None
if len(lines) > 0:
if self.show_repro_cmd:
if event.data['repro'] is not None:
lines.append(clr('@!@{kf}{}@|\n').format(event.data['repro']))
# Print the output
if header_border:
wide_log(header_border, file=header_border_file)
if header_title:
wide_log(header_title, file=header_title_file)
if len(lines) > 0:
wide_log(''.join(lines), end='\r', file=lines_target)
if footer_border:
wide_log(footer_border, file=footer_border_file)
if footer_title:
wide_log(footer_title, file=footer_title_file)
elif 'STDERR' == eid:
if self.show_live_stderr and len(event.data['data']) > 0:
wide_log(self.format_interleaved_lines(event.data), end='\r', file=sys.stderr)
elif 'STDOUT' == eid:
if self.show_live_stdout and len(event.data['data']) > 0:
wide_log(self.format_interleaved_lines(event.data), end='\r')
elif 'MESSAGE' == eid:
wide_log(event.data['msg'])
# Print the full summary
if self.show_full_summary:
self.print_exec_summary(completed_jobs, warned_jobs, failed_jobs)
# Print a compact summary
if self.show_summary or self.show_full_summary:
self.print_compact_summary(completed_jobs, warned_jobs, failed_jobs)
# Print final runtime
wide_log(clr('[{}] Runtime: {} total.').format(
self.label,
format_time_delta(time.time() - start_time)))
def format_interleaved_lines(self, data):
if self.max_toplevel_jobs != 1:
prefix = clr('[{}:{}] ').format(
data['job_id'],
data['stage_label'])
else:
prefix = ''
template = '\r{}\r{}'.format(' ' * terminal_width(), prefix)
suffix = clr('@|')
return ''.join(template + line + suffix for line in data['data'].splitlines(True))
|
|
"""Tests for the linalg._isolve.lgmres module
"""
from numpy.testing import (assert_, assert_allclose, assert_equal,
suppress_warnings)
import pytest
from platform import python_implementation
import numpy as np
from numpy import zeros, array, allclose
from scipy.linalg import norm
from scipy.sparse import csr_matrix, eye, rand
from scipy.sparse.linalg._interface import LinearOperator
from scipy.sparse.linalg import splu
from scipy.sparse.linalg._isolve import lgmres, gmres
Am = csr_matrix(array([[-2, 1, 0, 0, 0, 9],
[1, -2, 1, 0, 5, 0],
[0, 1, -2, 1, 0, 0],
[0, 0, 1, -2, 1, 0],
[0, 3, 0, 1, -2, 1],
[1, 0, 0, 0, 1, -2]]))
b = array([1, 2, 3, 4, 5, 6])
count = [0]
def matvec(v):
count[0] += 1
return Am@v
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
def do_solve(**kw):
count[0] = 0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag = lgmres(A, b, x0=zeros(A.shape[0]),
inner_m=6, tol=1e-14, **kw)
count_0 = count[0]
assert_(allclose(A@x0, b, rtol=1e-12, atol=1e-12), norm(A@x0-b))
return x0, count_0
class TestLGMRES:
def test_preconditioner(self):
# Check that preconditioning works
pc = splu(Am.tocsc())
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
x0, count_0 = do_solve()
x1, count_1 = do_solve(M=M)
assert_(count_1 == 3)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
def test_outer_v(self):
# Check that the augmentation vectors behave as expected
outer_v = []
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v)
assert_(len(outer_v) > 0)
assert_(len(outer_v) <= 6)
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
prepend_outer_v=True)
assert_(count_1 == 2, count_1)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
# ---
outer_v = []
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v,
store_outer_Av=False)
assert_(array([v[1] is None for v in outer_v]).all())
assert_(len(outer_v) > 0)
assert_(len(outer_v) <= 6)
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
prepend_outer_v=True)
assert_(count_1 == 3, count_1)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
@pytest.mark.skipif(python_implementation() == 'PyPy',
reason="Fails on PyPy CI runs. See #9507")
def test_arnoldi(self):
np.random.seed(1234)
A = eye(2000) + rand(2000, 2000, density=5e-4)
b = np.random.rand(2000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]),
inner_m=15, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]),
restart=15, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
norm = np.linalg.norm(A.dot(x0) - b)
assert_(norm > 1e-4)
assert_allclose(x0, x1)
def test_cornercase(self):
np.random.seed(1234)
# Rounding error may prevent convergence with tol=0 --- ensure
# that the return values in this case are correct, and no
# exceptions are raised
for n in [3, 5, 10, 100]:
A = 2*eye(n)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
b = np.ones(n)
x, info = lgmres(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = lgmres(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
b = np.random.rand(n)
x, info = lgmres(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = lgmres(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_nans(self):
A = eye(3, format='lil')
A[1, 1] = np.nan
b = np.ones(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = lgmres(A, b, tol=0, maxiter=10)
assert_equal(info, 1)
def test_breakdown_with_outer_v(self):
A = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([1, 2])
x = np.linalg.solve(A, b)
v0 = np.array([1, 0])
# The inner iteration should converge to the correct solution,
# since it's in the outer vector list
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b, outer_v=[(v0, None), (x, None)], maxiter=1)
assert_allclose(xp, x, atol=1e-12)
def test_breakdown_underdetermined(self):
# Should find LSQ solution in the Krylov span in one inner
# iteration, despite solver breakdown from nilpotent A.
A = np.array([[0, 1, 1, 1],
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 0]], dtype=float)
bs = [
np.array([1, 1, 1, 1]),
np.array([1, 1, 1, 0]),
np.array([1, 1, 0, 0]),
np.array([1, 0, 0, 0]),
]
for b in bs:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b, maxiter=1)
resp = np.linalg.norm(A.dot(xp) - b)
K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))]
y, _, _, _ = np.linalg.lstsq(A.dot(K), b, rcond=-1)
x = K.dot(y)
res = np.linalg.norm(A.dot(x) - b)
assert_allclose(resp, res, err_msg=repr(b))
def test_denormals(self):
# Check that no warnings are emitted if the matrix contains
# numbers for which 1/x has no float representation, and that
# the solver behaves properly.
A = np.array([[1, 2], [3, 4]], dtype=float)
A *= 100 * np.nextafter(0, 1)
b = np.array([1, 1])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b)
if info == 0:
assert_allclose(A.dot(xp), b)
|
|
# Copyright (C) 2008 Valmantas Paliksa <walmis at balticum-tv dot lt>
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from blueman.Constants import *
from gi.repository import Gtk, Gdk
import cairo
from gi.repository import GObject
import weakref
class LinearController(object):
def get_value(self, input):
return input
class BezierController(LinearController):
def __init__(self, curvature=0.5, start=0.0, end=1.0):
self.curvature = curvature
self.start = start
self.end = end
def __b(self, t, p1, p2, p3):
return (1-t)**2*p1 + 2*(1-t)*t*p2 + t**2*p3
def get_value(self, input):
return self.__b(input, self.start, self.curvature, self.end)
class AnimBase(GObject.GObject):
__gsignals__ = {
'animation-finished' : (GObject.SignalFlags.RUN_LAST, None, ()),
}
def __init__(self, state=1.0):
GObject.GObject.__init__(self)
self._source = None
self._state = state
self.frozen = False
self.fps = 24.0
self.controller = LinearController()
def set_controller(self, cls, *params):
self.controller = cls(*params)
def _do_transition(self):
if abs(self._end-self._start) < 0.000001:
return False
self._state += self._step_size
if self._end-self._start < 0:
if self._state <= self._end:
self._state = self._end
self._state_changed(self._state)
self._source = None
self.emit("animation-finished")
return False
else:
if self._state >= self._end:
self._state = self._end
self._state_changed(self._state)
self._source = None
self.emit("animation-finished")
return False
self._state_changed(self._state)
return True
def thaw(self):
self.frozen = False
self.on_frozen(self.frozen)
def freeze(self):
self.frozen = True
self.on_frozen(self.frozen)
def on_frozen(self, is_frozen):
pass
def animate(self, start=1.0, end=0.0, duration=1000):
if self.frozen:
self.emit("animation-finished")
return
self._state = start
self._start = start
self._end = end
self._duration = duration
if self._source:
GObject.source_remove(self._source)
try:
self._step_size = (end-start) / (self.fps * (duration/1000.0))
except ZeroDivisionError:
self._state = end
return
self._state_changed(self._state)
self._source = GObject.timeout_add(int(1.0/self.fps*1000), self._do_transition)
def _state_changed(self, state):
self.state_changed(self.controller.get_value(state))
def state_changed(self, state):
pass
def get_state(self):
return self._state
def set_state(self, state):
self._state = state
self._state_changed(state)
def is_animating(self):
return self._source != None
class TreeRowFade(AnimBase):
def __init__(self, tw, path, columns=None):
AnimBase.__init__(self, 1.0)
self.tw = tw
self.sig = self.tw.connect_after("draw", self.on_expose)
self.row = Gtk.TreeRowReference.new(tw.props.model, path)
self.stylecontext = tw.get_style_context()
self.columns = None
def unref(self):
if self.sig != None:
self.tw.disconnect(self.sig)
self.sig = None
def get_iter(self):
return self.tw.props.model.get_iter(self.row.get_path())
def on_expose(self, widget, cr):
if self.frozen:
return
if not self.row.valid():
self.tw.disconnect(self.sig)
self.sig = None
return
path = self.row.get_path()
area = ()
color = self.stylecontext.get_background_color(0)
if not self.columns:
columns = self.tw.get_columns()
else:
columns = self.columns
for col in columns:
rect = self.tw.get_background_area(path, col)
Gdk.cairo_get_clip_rectangle(cr)
cr.rectangle(rect.x, rect.y, rect.width, rect.height)
cr.clip()
cr.set_source_rgba((1.0/65535)*color.red, (1.0/65535)*color.green, (1.0/65535)*color.blue, 1.0-self.get_state())
cr.set_operator(cairo.OPERATOR_OVER)
cr.paint()
def state_changed(self, state):
self.tw.queue_draw()
#print state
class TreeRowColorFade(TreeRowFade):
def __init__(self, tw, path, color):
TreeRowFade.__init__(self, tw, path, None)
self.color = color
def do_animation_finished(self):
self.unref()
def on_expose(self, widget, cr):
if self.frozen:
return
if not self.row.valid():
self.tw.disconnect(self.sig)
self.sig = None
return
path = self.row.get_path()
area = ()
color = self.stylecontext.get_background_color(0)
if not self.columns:
columns = self.tw.get_columns()
else:
columns = self.columns
for col in columns:
rect = self.tw.get_background_area(path, col)
cr.rectangle(rect.x, rect.y, rect.width, rect.height)
cr.clip()
cr.set_source_rgba((1.0/65535)*self.color.red, (1.0/65535)*self.color.green, (1.0/65535)*self.color.blue, 1.0-self.get_state())
cr.set_operator(cairo.OPERATOR_OVER)
cr.paint()
class CellFade(AnimBase):
def __init__(self, tw, path, columns=None):
AnimBase.__init__(self, 1.0)
self.tw = tw
self.frozen = False
self.sig = tw.connect_after("draw", self.on_expose)
self.row = Gtk.TreeRowReference.new(tw.props.model, path)
self.selection = tw.get_selection()
self.style = Gtk.rc_get_style(tw)
self.stylecontext = tw.get_style_context()
self.columns = []
for i in columns:
self.columns.append(self.tw.get_column(i))
def unref(self):
if self.sig != None:
self.tw.disconnect(self.sig)
self.sig = None
def get_iter(self):
return self.tw.props.model.get_iter(self.row.get_path())
def on_expose(self, widget, cr):
if self.frozen:
return
if not self.row.valid():
self.tw.disconnect(self.sig)
self.sig = None
return
path = self.row.get_path()
area = ()
color = self.stylecontext.get_background_color(0)
for col in self.columns:
bg_rect = self.tw.get_background_area(path, col)
rect = self.tw.get_cell_area(path, col)
rect.y = bg_rect.y
rect.height = bg_rect.height
cr.rectangle(rect.x, rect.y, rect.width, rect.height)
cr.clip()
if not (rect.height == 0 or rect.height == 0):
detail = "cell_even" if path[0] % 2 == 0 else "cell_odd"
if self.tw.props.rules_hint:
detail += "_ruled"
selected = self.selection.get_selected()[1] and self.tw.props.model.get_path(self.selection.get_selected()[1]) == path
Gtk.paint_flat_box(self.tw.get_style(),
cr,
Gtk.StateType.SELECTED if (selected) else Gtk.StateType.NORMAL,
0,
self.tw,
detail,
rect.x,
rect.y,
rect.width,
rect.height)
#FIXME pixmap got lost during port to gtk3
#cr.set_source_pixmap(pixmap, rect.x, rect.y)
cr.paint_with_alpha(self.get_state())
def state_changed(self, state):
self.tw.queue_draw()
#print state
class WidgetFade(AnimBase):
def __init__(self, widget, color):
AnimBase.__init__(self, 1.0)
self.widget = widget
self.color = color
self.sig = widget.connect_after("draw", self.on_expose)
def on_expose(self, window, cr):
if not self.frozen:
rect = self.widget.get_allocation()
cr.rectangle(rect.x, rect.y, rect.width, rect.height)
cr.clip()
cr.set_source_rgba((1.0/65535)*self.color.red, (1.0/65535)*self.color.green, (1.0/65535)*self.color.blue, 1.0-self.get_state())
cr.set_operator(cairo.OPERATOR_OVER)
cr.paint()
def state_changed(self, state):
self.widget.queue_draw()
|
|
import math
import struct
import time
import smbus # pylint: disable=E0401
global i2c
EC_SALINITY = 0x3c # EC Salinity probe I2C address
EC_MEASURE_EC = 80
EC_MEASURE_SW = 40
EC_MEASURE_TEMP = 20
EC_CALIBRATE_EC = 10
EC_CALIBRATE_SW = 8
EC_I2C = 4
EC_READ = 2
EC_WRITE = 1
EC_VERSION_REGISTER = 0 # version register */
EC_FW_VERSION_REGISTER = 1 # firmware version register */
EC_MS_REGISTER = 2 # mS register */
EC_SALINITY_PSU = 6 # salinity register */
EC_TEMP_REGISTER = 10 # temperature in C register */
EC_RAW_REGISTER = 14 # raw count register */
EC_SOLUTION_REGISTER = 18 # calibration solution register */
EC_CALIBRATE_EC_REGISTER = 22 # temperatue coefficient register */
EC_CALIBRATE_SW_REGISTER = 26 # reference low register */
EC_TEMP_COMPENSATION_REGISTER = 30 # temperature compensation register */
EC_BUFFER_REGISTER = 34 # buffer register */
EC_CONFIG_REGISTER = 38 # config register */
EC_TASK_REGISTER = 39 # task register */
EC_EC_MEASUREMENT_TIME = 250 # delay between EC measurements
EC_TEMP_MEASURE_TIME = 750 # delay for temperature measurement
EC_TEMP_COMPENSATION_CONFIG_BIT = 0 # temperature compensation config bit
class ecsalinity(object):
S = 0
mS = 0
uS = 0
raw = 0
PPM_500 = 0
PPM_640 = 0
PPM_700 = 0
salinityPSU = 0
tempC = 0
tempF = 0
address = EC_SALINITY
def __init__(self, address=EC_SALINITY, i2c_bus=3, **kwargs):
global i2c
self.address = address
i2c = smbus.SMBus(i2c_bus)
# measurements
def _measure(self, EC, newTemp=None):
if newTemp is True:
self.measureTemp()
if EC is True:
self._send_command(EC_MEASURE_EC)
else:
self._send_command(EC_MEASURE_SW)
time.sleep(EC_EC_MEASUREMENT_TIME / 1000.0)
self.mS = self._read_register(EC_MS_REGISTER)
self.raw = self._read_register(EC_RAW_REGISTER)
if self.raw == 0:
self.mS = float('inf')
if math.isinf(self.mS) is not True:
self.PPM_500 = self.mS * 500
self.PPM_640 = self.mS * 640
self.PPM_700 = self.mS * 700
self.uS = self.mS * 1000
self.S = self.mS / 1000
self.salinityPSU = self._read_register(EC_SALINITY_PSU)
else:
self.mS = -1
self.PPM_500 = -1
self.PPM_640 = -1
self.PPM_700 = -1
self.uS = -1
self.S = -1
self.salinityPSU = -1
return self.mS
def measureEC(self, newTemp=None):
if newTemp is None:
return self._measure(True, self.usingTemperatureCompensation())
else:
return self._measure(True, newTemp)
def measureSW(self, newTemp=None):
if newTemp is None:
return self._measure(False, self.usingTemperatureCompensation())
else:
return self._measure(False, newTemp)
def measureTemp(self):
self._send_command(EC_MEASURE_TEMP)
time.sleep(EC_TEMP_MEASURE_TIME / 1000.0)
self.tempC = self._read_register(EC_TEMP_REGISTER)
if self.tempC is -127.0:
self.tempF = -127.0
else:
self.tempF = ((self.tempC * 9) / 5) + 32
return self.tempC
# calibration
def calibrateEC(self, solutionEC):
self._write_register(EC_SOLUTION_REGISTER, solutionEC)
self._send_command(EC_CALIBRATE_EC)
time.sleep(EC_TEMP_MEASURE_TIME / 1000.0)
def getCalibrationEC(self):
return self._read_register(EC_CALIBRATE_EC_REGISTER)
def calibrateSW(self, solutionSW):
self._write_register(EC_SOLUTION_REGISTER, solutionSW)
self._send_command(EC_CALIBRATE_SW)
time.sleep(EC_TEMP_MEASURE_TIME / 1000.0)
def getCalibrationSW(self):
return self._read_register(EC_CALIBRATE_SW_REGISTER)
# temperature
def setTemp(self, temp_C):
self._write_register(EC_TEMP_REGISTER, temp_C)
self.tempC = temp_C
self.tempF = ((self.tempC * 9) / 5) + 32
def setTempConstant(self, b):
self._write_byte(EC_TEMP_COMPENSATION_REGISTER, b)
def getTempConstant(self):
return self._read_byte(EC_TEMP_COMPENSATION_REGISTER)
def useTemperatureCompensation(self, b):
retval = self._read_byte(EC_CONFIG_REGISTER)
retval = self._bit_set(retval, EC_TEMP_COMPENSATION_CONFIG_BIT, b)
self._write_byte(EC_CONFIG_REGISTER, retval)
def usingTemperatureCompensation(self):
retval = self._read_byte(EC_CONFIG_REGISTER)
return (retval >> EC_TEMP_COMPENSATION_CONFIG_BIT) & 0x01
# utilities
def getVersion(self):
return self._read_byte(EC_VERSION_REGISTER)
def getFirmware(self):
return self._read_byte(EC_FW_VERSION_REGISTER)
def reset(self):
n = float('nan')
self._write_register(EC_CALIBRATE_EC_REGISTER, n)
self._write_register(EC_CALIBRATE_SW_REGISTER, n)
self.setTempConstant(25)
self.useTemperatureCompensation(False)
def setI2CAddress(self, i2cAddress):
self._write_register(EC_BUFFER_REGISTER, float(i2cAddress))
self._send_command(EC_I2C)
self.address = int(i2cAddress)
def connected(self):
retval = self._read_byte(EC_VERSION_REGISTER)
if retval != 0xFF:
return True
else:
return False
def readEEPROM(self, address):
self._write_register(EC_SOLUTION_REGISTER, address)
self._send_command(EC_READ)
return self._read_register(EC_BUFFER_REGISTER)
def writeEEPROM(self, address, value):
self._write_register(EC_SOLUTION_REGISTER, address)
self._write_register(EC_BUFFER_REGISTER, value)
self._send_command(EC_WRITE)
def _bit_set(self, v, index, x):
mask = 1 << index
v &= ~mask
if x:
v |= mask
return v
def _change_register(self, r):
global i2c
i2c.write_byte(self.address, r)
time.sleep(10 / 1000.0)
def _send_command(self, command):
global i2c
i2c.write_byte_data(self.address, EC_TASK_REGISTER, command)
time.sleep(10 / 1000.0)
def _write_register(self, reg, f):
global i2c
n = self.round_total_digits(f)
fd = bytearray(struct.pack("f", n))
data = [0, 0, 0, 0]
data[0] = fd[0]
data[1] = fd[1]
data[2] = fd[2]
data[3] = fd[3]
self._change_register(reg)
i2c.write_i2c_block_data(self.address, reg, data)
time.sleep(10 / 1000.0)
def _read_register(self, reg):
global i2c
data = [0, 0, 0, 0]
self._change_register(reg)
data[0] = i2c.read_byte(self.address)
data[1] = i2c.read_byte(self.address)
data[2] = i2c.read_byte(self.address)
data[3] = i2c.read_byte(self.address)
ba = bytearray(data)
f = struct.unpack('f', ba)[0]
return self.round_total_digits(f)
def _write_byte(self, reg, val):
global i2c
i2c.write_byte_data(self.address, reg, val)
time.sleep(10 / 1000.0)
def _read_byte(self, reg):
global i2c
self._change_register(reg)
time.sleep(10 / 1000.0)
return i2c.read_byte(self.address)
def magnitude(self, x):
if math.isnan(x):
return 0
return 0 if x == 0 else int(math.floor(math.log10(abs(x)))) + 1
def round_total_digits(self, x, digits=7):
return round(x, digits - self.magnitude(x))
|
|
import time, copy
import os, os.path
import sys
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from scipy import optimize
from echem_plate_ui import *
from echem_plate_math import *
import pickle
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20120728NiFeCoTiplate1_test21Aug2012'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate1_dlist.dat'
#os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate1_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate1'
#vshift=-.2
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9FeCoNiTi_500C_fast_CPCV_plate3_dlist.dat'
##os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate3_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate3'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9FeCoNiTi_500C_fast_CPCV_plate2_dlist.dat'
##os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate3_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate2'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCPCV_plate1_dlist.dat'
#os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate1_LinSubPlots2')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate1'
#
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastrep2_plate1_dlist.dat'
##os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate3_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastrep2_plate1'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastrep3_plate1_dlist.dat'
##os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate3_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastrep3_plate1'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9FeCoNiTi_500C_CPCV_Plate3-rerun_dlist.dat'
##os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate3_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate3'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121108NiFeCoAl_F/results/NiFeCoAl_F_plate3_dlist.dat'
#os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121108NiFeCoAl_F/results/plate3/LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121108NiFeCoAl_F/results/'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/20121101NiFeCoTi_P_plate3_dlist.dat'#20121031NiFeCoTi_P_plate2_dlist.dat'
#os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/plate3/LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/plate3'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/20121031NiFeCoTi_P_plate2_dlist.dat'#
#os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/plate2/LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/plate2'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/20121031NiFeCoTi_P_plate1_dlist.dat'#
#os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/plate1/LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/plate1'
#pl=1
#os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoLa/results/plate%d/LinSubPlots'%pl)
#savefolder='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoLa/results/plate%d' %pl
#if pl==1:
# p='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoLa/results/20130425 NiFeCoLa_plate1_5959_dlist.dat';vshift=-(.187-.030)
#elif pl==2:
# p='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoLa/results/20130426NiFeCoLa_plate2_5904_dlist.dat';vshift=-(.187-.028)
#elif pl==3:
# p='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoLa/results/20130427 NiFeCoLa_plate3_5791_dlist.dat';vshift=-(.187-.005)
#pl=3
#os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCeLa/results/plate%d/LinSubPlots'%pl)
#savefolder='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCeLa/results/plate%d' %pl
#if pl==1:
# p='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCeLa/results/20130423 NiFeCeLa_plate1_5836_dlist.dat';vshift=-(.187-.005)
#elif pl==2:
# p='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCeLa/results/20130424 NiFeCeLa_plate2 5825 B_dlist.dat';vshift=-(.187-0.)
#elif pl==3:
# p='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCeLa/results/20130425 NiFeCeLa_plate3_5847_dlist.dat';vshift=-(.187-.034)
#pl=3
#os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/results/plate%d/LinSubPlots'%pl)
#savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/results/plate%d' %pl
#if pl==1:
# p='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/results/20130402NiFeCoCe_Plate1_5500_dlist.dat';vshift=-(.187-.045)
#elif pl==2:
# p='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/results/20130403NiFeCoCe_Plate2_5498_dlist.dat';vshift=-(.187-.045)
#elif pl==3:
# p='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/results/20130403NiFeCoCe_Plate3_4835_dlist.dat';vshift=-(.187-.045)
#os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/parsedresults/LinSubPlots0.02')
#savefolder='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/parsedresults/fom0.02_plate123'
#p='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/parsedresults/201304NiFeCoCe_compline0.02_plate123_dlist.dat';vshift=-(.187-.0)
#os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results/LinSubPlots')
#savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results'
#p='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results/20130604NiFeCoCe_plate1_CV_6220_dlist.dat';vshift=-(.187-.043)
#pl=3
#os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130528NiFeCoCe3platerescan/results/plate%d/LinSubPlots'%pl)
#savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130528NiFeCoCe3platerescan/results/plate%d' %pl
#if pl==1:
# p='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130528NiFeCoCe3platerescan/results/20130529NiFeCoCe_plate1_5577_dlist.dat';vshift=-(.187-.045)
#elif pl==2:
# p='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130528NiFeCoCe3platerescan/results/20130603NiFeCoCe_plate2_5498_dlist.dat';vshift=-(.187-.045)
#elif pl==3:
# p='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130528NiFeCoCe3platerescan/results/20130528NiFeCoCe_plate3_4835_dlist.dat';vshift=-(.187-0.045)
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130610NiFeCoCesingle_6321/results/LinSubPlots')
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130610NiFeCoCesingle_6321/results'
p='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130612NiFeCoCesingle_6321/results/20130612NiFeCoCe_plate1_CVpostCP_6321_dlist.dat';vshift=-(.187-0.045)
#vshift=0#.-.177#-.24
f=open(p, mode='r')
dlist=pickle.load(f)
f.close()
##filter dlist
dlist=[d for d in dlist if 'I(A)_LinSub' in d.keys()]
##making 10-sample plots of linear subtraction
cols=['k','b', 'g', 'r', 'c', 'm', 'y', 'brown', 'purple', 'grey']
smpall=numpy.array([d['Sample'] for d in dlist])
dinds=numpy.argsort(smpall)
plotcount=0
smpl=[]
pylab.figure()
for di in dinds:
d=dlist[di]
if plotcount==10:
s='_'.join([`smp` for smp in smpl])
pylab.title(s)
pylab.savefig(s)
plotcount=0
smpl=[]
pylab.figure()
for segd in d['segprops_dlist']:#[2:3]:
x=d['Ewe(V)'][segd['inds']]
y1=d['I(A)'][segd['inds']]
y2=d['I(A)_LinSub'][segd['inds']]
pylab.plot(x, y1, '--', color=cols[plotcount])
pylab.plot(x, y1-y2, ':', color=cols[plotcount])
pylab.plot(x, y2, '-', color=cols[plotcount])
break
smpl+=[d['Sample']]
plotcount+=1
###making 6-sample plots of linear subtraction
#cols=['k','b', 'g', 'r', 'c', 'm', 'y', 'brown', 'purple', 'grey']
#smpall=numpy.array([d['Sample'] for d in dlist])
#dinds=numpy.argsort(smpall)
#plotcount=1
#smpl=[]
#PLOT=1
#SAVE=1
#pylab.figure()
#for di in dinds:
# d=dlist[di]
# if PLOT:
# if not d['Sample'] in [1066,1662]:#[1889,1662]:#[1662, 582, 2073, 2077, 1141, 9, 1603, 1227, 1139, 610]:#[610,1382]:#[76,43,44,53,20,34,42,28,57,55]:#[ 30,67, 641,36,41,46,47,49,58,74]:#[1811, 1382, 1338]:
# continue
# if SAVE:
# fld, fn=os.path.split(p)
# savep=os.path.join(os.path.join(fld, 'echemplots'), fn[:-4]+'_%d.dat' %d['Sample'])
# f=open(savep, mode='w')
# pickle.dump(d, f)
# f.close()
## if plotcount==6:
## s='_'.join([`smp` for smp in smpl])
## pylab.title(s)
## pylab.savefig(s)
## plotcount=0
## smpl=[]
## pylab.clf()
#
# segd1, segd2=d['segprops_dlist']
# x=d['Ewe(V)'][segd1['inds']]
# y1=d['I(A)'][segd1['inds']]
# y2=d['I(A)_LinSub'][segd1['inds']]
# if PLOT:
# pylab.plot(x[5:], y1[5:], '-', color=cols[plotcount])
# pylab.plot(x[5:], (y1-y2)[5:], ':', color=cols[plotcount])
# d['ImaxLinSub']=numpy.max(y2)
# x=d['Ewe(V)'][segd2['inds']]
# y1=d['I(A)'][segd2['inds']]
# if PLOT:
# pylab.plot(x, y1, '--', color=cols[plotcount])
# d['Imin']=numpy.min(y1)
# d['IminFromEnd']=numpy.min(y1)-y1[-50:].mean()
# smpl+=[d['Sample']]
# plotcount+=1
####finding the riht samples to plot
#if PLOT:
# pylab.show()
#else:
# sample=numpy.array([dlist[di]['Sample'] for di in dinds])
# imin=numpy.array([dlist[di]['Imin'] for di in dinds])
# isort=numpy.argsort(imin)
# inds2=dinds[isort]
# #print [dlist[di]['Sample'] for di in inds2[:10]]
#
# imax=numpy.array([dlist[di]['ImaxLinSub'] for di in dinds])
# inds3=numpy.where((imin>-2.2e-5))[0]
# isort3=numpy.argsort(imax[inds3])
# #print sample[inds3[isort3[-10:]]]
# #print imin[inds3[isort3[-10:]]]
# #print imax[inds3[isort3[-10:]]]
#
# iminend=numpy.array([dlist[di]['IminFromEnd'] for di in dinds])
# inds4=numpy.where((iminend>-1.e-6))[0]
# isort4=numpy.argsort(imax[inds4])
# #
# #print sample[inds4[isort4[-10:]]]
# #print iminend[inds4[isort4[-10:]]]
# #print imax[inds4[isort4[-10:]]]
#
# didt=numpy.array([dlist[di]['dIdt_LinSub'] for di in dinds])
# inds5=numpy.where((didt<6.e-5)&(imax>8.e-5)&(imax<1.e-4))[0]
# isort5=numpy.argsort(iminend[inds5])
# print sample[inds5[isort5[:10]]]
# print iminend[inds5[isort5[:10]]]
# print imax[inds5[isort5[:10]]]
##save csv of FOM
##calculate V for critical I, etc
for d in dlist:
inds=d['segprops_dlist'][0]['inds']
#d['CV4fwdImax']=numpy.max(d['I(A)'][inds])
i=d['I(A)_LinSub'][inds]
v=d['Ewe(V)'][inds]
d['ImaxCVLinSub']=numpy.max(i)
vsh=v+vshift
# aveinds=numpy.where((vsh>.495)&(vsh<=.505))
# d['I500mVoverpotLinSub']=numpy.mean(i[aveinds])
aveinds=numpy.where((v>.645)&(v<=.655))
d['I650mVLinSub']=numpy.mean(i[aveinds])
vanl=[.3, .35, .4]
var=vsh
for van in vanl:
k='I%dmVLinSub' %(van*1000.,)
aveinds=numpy.where((var>van-.005)&(var<=van+.005))
d[k]=numpy.mean(i[aveinds])
# aveinds=numpy.where((v>.695)&(v<=.705))
# d['I700mVLinSub']=numpy.mean(i[aveinds])
# #vsh=v+vshift
# aveinds=numpy.where((v>.672)&(v<=.682))
# d['I677mVLinSub']=numpy.mean(i[aveinds])
#
# aveinds=numpy.where((v>.622)&(v<=.632))
# d['I627mVLinSub']=numpy.mean(i[aveinds])
# aveinds=numpy.where((v>.572)&(v<=.582))
# d['I577mVLinSub']=numpy.mean(i[aveinds])
# ###requirement to be above critical current for n consecutive points
# icrit=1.e-4
# b=numpy.int16(i>=icrit)
# n=10
# bconsec=[b[i:i+n].prod() for i in range(len(b)-n)]
# if True in bconsec:
# i=bconsec.index(True)
# d['V_IthreshCVLinSub']=v[i:i+n].mean()
# else:
# d['V_IthreshCVLinSub']=numpy.nan
#
###requirement to be above critical current for rest of scan with n outliers
nout=5
for icrit in [1.e-5, 3.e-5, 1.e-4, 1.9e-4, 3.e-4]:
k='V_IthreshCVLinSub_%d' %(icrit*1.e6)
b=numpy.where(i<icrit)[0]
if (len(i)-len(b))<(nout+1) or numpy.all(i[-nout:]<icrit):
d[k]=numpy.nan
else:
if len(b)==0:
ind=0
else:
ind=b[-nout]
ind+=nout-1
d[k]=var[max(0, ind-4):ind+4].mean()
#savekeys=['SegIndStart_LinSub','LinLen_LinSub','Intercept_LinSub','dIdt_LinSub', 'ImaxCVLinSub', 'V_IthreshCVLinSub', 'I500mVoverpotLinSub']
savekeys=['SegIndStart_LinSub','LinLen_LinSub','Intercept_LinSub','dIdt_LinSub', 'ImaxCVLinSub', 'V_IthreshCVLinSub_300', 'V_IthreshCVLinSub_100', 'V_IthreshCVLinSub_30', 'V_IthreshCVLinSub_10', 'V_IthreshCVLinSub_190', 'I300mVLinSub', 'I350mVLinSub', 'I400mVLinSub']#'CV6fwdImax', 'I627mVLinSub', 'I577mVLinSub']
mainapp=QApplication(sys.argv)
form=MainMenu(None, execute=False, folderpath=savefolder)
echemvis=form.echem
echemvis.techniquedictlist=dlist
def savefom(dlist, savefolder, key):
for d in dlist:
d['FOM']=d[key]
echemvis.writefile(p=savefolder, explab=key)
for skey in savekeys:
savefom(echemvis.techniquedictlist, savefolder, skey)
###dEdt analysis
#calcmeandEdt_dlist(dlist)
#SegSG_dlist(dlist, SGpts=10, order=1, k='Ewe(V)')
#dIdEcrit=.0005
#SegdtSG_dlist(dlist, SGpts=10, order=1, k='I(A)_LinSub', dxk='dE')
#for d in dlist:
# for segd in d['segprops_dlist'][:1]:
# y=d['I(A)_LinSub_dtSG'][segd['inds']]
# x=d['Ewe(V)_SG'][segd['inds']]
# starti=numpy.where(y<dIdEcrit)[0][-1]+1
# if starti<len(y):
# d['dIdE_aveabovecrit']=y[starti:].mean()
# d['E_dIdEcrit']=x[starti]
# else:
# d['dIdE_aveabovecrit']=numpy.nan
# d['E_dIdEcrit']=numpy.nan
# d['dIdEmax']=y.max()
#for key in ['dIdE_aveabovecrit','E_dIdEcrit', 'dIdEmax']:
# savefom(echemvis.techniquedictlist, savefolder, key)
###plot select CVs
#smps=[d['Sample'] for d in dlist]
#for sample in [4, 164, 459, 539]:
# d=dlist[smps.index(sample)]
# inds=d['segprops_dlist'][0]['inds']
# x=d['Ewe(V)'][inds][3:]
# y1=d['I(A)'][inds][3:]
# y2=d['I(A)_LinSub'][inds][3:]
# pylab.figure()
# pylab.plot(d['Ewe(V)'][3:], d['I(A)'][3:], '-', color='k')
# #pylab.plot(x, y1, '--', color=)
# pylab.plot(x, y1-y2, ':', color='r')
# pylab.plot(x, y2, '-', color='b')
# pylab.title(`sample`)
#pylab.show()
#print 'done'
###making select ample plots of dI/dt
#cols=['k','b', 'g', 'r', 'c', 'm', 'y', 'brown', 'purple', 'grey']
#smpall=numpy.array([d['Sample'] for d in dlist])
#dinds=numpy.argsort(smpall)
#plotcount=1
#labs=[]
#PLOT=1
#pylab.figure(figsize=(6, 4))
#ax=pylab.subplot(111)
#ax2=ax.twinx()
#for di in dinds:
# d=dlist[di]
# if PLOT:
# if not d['Sample'] in [541,548,546]:#[868,1334,365]:#[1413,1834,1356]:
# continue
#
#
# segd1, segd2=d['segprops_dlist']
# x=d['Ewe(V)'][segd1['inds']]
# y1=d['I(A)'][segd1['inds']]
# y2=d['I(A)_LinSub'][segd1['inds']]
# dy2=d['I(A)_LinSub_dtSG'][segd1['inds']]
# xc=d['E_dIdEcrit']
# if PLOT:
# ax.plot(x[5:], y1[5:], '--', color=cols[plotcount])
# ax.plot(x[5:], y2[5:], '-', color=cols[plotcount])
# ax2.plot(x[5:], dy2[5:], ':', color=cols[plotcount])
# #i=numpy.argmin((xc-x)**2)
# ax.plot([xc, xc], list(ax.get_ylim()), '-', color=cols[plotcount], alpha=.7)
#
# labs+=['%d:%d-%d-%d-%d' %tuple([d['Sample']]+list(d['compositions']*100.))]
# plotcount+=1
#
####finding the riht samples to plot
#if PLOT:
# pylab.title(', '.join(labs), fontsize=14)
# ax.set_xlabel('V (ref)', fontsize=14)
# ax.set_ylabel('I, raw=dashed, LinSub=solid', fontsize=14)
# ax2.set_ylabel('dI/dE, dotted', fontsize=14)
# pylab.subplots_adjust(left=.19, right=.83, bottom=.16)
# pylab.show()
#
#
#else:
# dinds2=numpy.array([di for di in dinds if not numpy.isnan(dlist[di]['E_dIdEcrit'])])
# sample=numpy.array([dlist[di]['Sample'] for di in dinds2])
#
# EdIdE=numpy.array([dlist[di]['E_dIdEcrit'] for di in dinds2])
# isortEdIdE=numpy.argsort(EdIdE)
# indsEdIdE=dinds2[isortEdIdE]
#
# avedIdE=numpy.array([dlist[di]['dIdE_aveabovecrit'] for di in dinds2])
# isortavedIdE=numpy.argsort(avedIdE)
# indsavedIdE=dinds2[isortavedIdE]
#
# maxdIdE=numpy.array([dlist[di]['dIdEmax'] for di in dinds2])
# isortmaxdIdE=numpy.argsort(maxdIdE)
# indsmaxdIdE=dinds2[isortmaxdIdE]
#
## print '%s, (%s,%s)' %(`[EdIdE[i] for i in isortEdIdE[:3]]`, `EdIdE.min()`, `EdIdE.max()`)
## print ', '.join(['%s:%s' %(`dlist[di]['Sample']`, `dlist[di]['compositions']`) for di in indsEdIdE[:3]])
#
## print '%s, (%s,%s)' %(`[avedIdE[i] for i in isortavedIdE[-3:]]`, `avedIdE.min()`, `avedIdE.max()`)
## print ', '.join(['%s:%s' %(`dlist[di]['Sample']`, `dlist[di]['compositions']`) for di in indsavedIdE[-3:]])
#
# print '%s, (%s,%s)' %(`[maxdIdE[i] for i in isortmaxdIdE[-3:]]`, `maxdIdE.min()`, `maxdIdE.max()`)
# print ', '.join(['%s:%s' %(`dlist[di]['Sample']`, `dlist[di]['compositions']`) for di in indsmaxdIdE[-3:]])
if 1:
f=open(p, mode='w')
pickle.dump(dlist, f)
f.close()
|
|
from scad import *
import math
import os
class SpindleBaseHD(SCAD_Object):
top_cyl_dia = 14.5 + 0.3
top_cyl_height = 3.7
mid_cyl_dia = 25 + 0.3
mid_cyl_height = 5.0
bot_cyl_dia = 30.2
bot_cyl_height = 2.1
spindle_height = top_cyl_height + mid_cyl_height - 1
screw_count = 6
screw_dia = 2.7
screw_offset = (mid_cyl_dia / 2.0) - (screw_dia / 2.0) - 1.0 - 0.2
screw_head_height = 1.5
screw_head_dia = 3.9
screw_height = spindle_height - screw_head_dia
magnet_size = (1/8.0) * 25.4 + 0.2
def base(self):
top_cyl = Cylinder(d=self.top_cyl_dia, h=self.top_cyl_height, center=True)
top_cyl = Translate(z=(self.top_cyl_height + self.mid_cyl_height) / 2.0)(top_cyl)
mid_cyl = Cylinder(d=self.mid_cyl_dia, h=self.mid_cyl_height, center=True)
radstep = (2 * math.pi) / self.screw_count
screws = []
for screw_idx in range(self.screw_count):
screw = Cylinder(d=self.screw_dia, h=self.screw_height, center=True)
screw_head = Cylinder(d1=self.screw_dia, d2=self.screw_head_dia, h=self.screw_head_height, center=True)
z_offset = (self.screw_head_height + self.screw_height) / 2.0
screw_head = Translate(z=z_offset)(screw_head)
screw = Union()(screw, screw_head)
x_offset = math.cos(radstep * screw_idx) * self.screw_offset
y_offset = math.sin(radstep * screw_idx) * self.screw_offset
z_offset = self.screw_height / 2.0
screw = Translate(x=x_offset, y=y_offset, z=z_offset)(screw)
screws.append(screw)
screws = Union()(*screws)
cyl = Union()(top_cyl, mid_cyl, screws)
cyl = Translate(z=self.mid_cyl_height / 2.0)(cyl)
return cyl
def neck_post(self):
self.neck_post_dia = 26
self.neck_post_len = 20
dia = self.screw_dia * 2.5
escs = []
radstep = (2 * math.pi) / self.screw_count
for screw_idx in range(self.screw_count):
esc = Cylinder(d1=dia, h=self.neck_post_len / 2.0, center=True)
x_offset = math.cos(radstep * screw_idx) * (self.screw_offset + 1)
y_offset = math.sin(radstep * screw_idx) * (self.screw_offset + 1)
z_offset = self.neck_post_len / -4.0
esc = Translate(x=x_offset, y=y_offset, z=z_offset)(esc)
escs.append(esc)
escs = Union()(*escs)
neck_post = Cylinder(d1=self.neck_post_dia + 1, d2=self.neck_post_dia - 1, h=self.neck_post_len, center=True)
neck_post = Difference()(neck_post, escs)
return neck_post
def magnet(self):
magnet = Cube(x=self.magnet_size, y=self.magnet_size, z=self.magnet_size, center=True)
return magnet
def scad(self):
base = self.base()
spindle = Cylinder(d=self.mid_cyl_dia + 10, h=self.spindle_height)
magnet = self.magnet()
x_offset = (self.mid_cyl_dia + 10) / 2.0 - (self.magnet_size / 2.0) + 0.2
z_offset = (self.magnet_size + self.mid_cyl_height) / 2.0
magnet = Translate(x=x_offset, z=z_offset)(magnet)
spindle = Difference()(spindle, base, magnet)
neck_post = self.neck_post()
z_offset = (self.neck_post_len / 2.0) + self.spindle_height
neck_post = Translate(z=z_offset)(neck_post)
spindle = Union()(spindle, neck_post)
#spindle = Union()(spindle, base)
spindle = SCAD_Globals(fn=50)(spindle)
return spindle
class SpindleBase(SCAD_Object):
top_cyl_dia = 14.5 + 0.3
top_cyl_height = 3.7
mid_cyl_dia = 25 + 0.3
mid_cyl_height = 5.0
bot_cyl_dia = 30.2
bot_cyl_height = 2.1
spindle_height = top_cyl_height + mid_cyl_height - 1
screw_count = 6
screw_dia = 2.7
screw_offset = (mid_cyl_dia / 2.0) - (screw_dia / 2.0) - 1.0 - 0.2
screw_head_height = 1.5
screw_head_dia = 3.9
screw_height = spindle_height - screw_head_dia
magnet_size = (1/8.0) * 25.4 + 0.2
def base(self):
top_cyl = Cylinder(d=self.top_cyl_dia, h=self.top_cyl_height, center=True)
top_cyl = Translate(z=(self.top_cyl_height + self.mid_cyl_height) / 2.0)(top_cyl)
mid_cyl = Cylinder(d=self.mid_cyl_dia, h=self.mid_cyl_height, center=True)
radstep = (2 * math.pi) / self.screw_count
screws = []
for screw_idx in range(self.screw_count):
screw = Cylinder(d=self.screw_dia, h=self.screw_height, center=True)
screw_head = Cylinder(d1=self.screw_dia, d2=self.screw_head_dia, h=self.screw_head_height, center=True)
z_offset = (self.screw_head_height + self.screw_height) / 2.0
screw_head = Translate(z=z_offset)(screw_head)
screw = Union()(screw, screw_head)
x_offset = math.cos(radstep * screw_idx) * self.screw_offset
y_offset = math.sin(radstep * screw_idx) * self.screw_offset
z_offset = self.screw_height / 2.0
screw = Translate(x=x_offset, y=y_offset, z=z_offset)(screw)
screws.append(screw)
screws = Union()(*screws)
cyl = Union()(top_cyl, mid_cyl, screws)
cyl = Translate(z=self.mid_cyl_height / 2.0)(cyl)
return cyl
def neck_post(self):
self.neck_post_dia = 26
self.neck_post_len = 20
dia = self.screw_dia * 2.5
escs = []
radstep = (2 * math.pi) / self.screw_count
for screw_idx in range(self.screw_count):
esc = Cylinder(d1=dia, h=self.neck_post_len / 2.0, center=True)
x_offset = math.cos(radstep * screw_idx) * (self.screw_offset + 1)
y_offset = math.sin(radstep * screw_idx) * (self.screw_offset + 1)
z_offset = self.neck_post_len / -4.0
esc = Translate(x=x_offset, y=y_offset, z=z_offset)(esc)
escs.append(esc)
escs = Union()(*escs)
neck_post = Cylinder(d=self.neck_post_dia + 1.0, h=self.neck_post_len, center=True)
return neck_post
def magnet(self):
magnet = Cube(x=self.magnet_size, y=self.magnet_size, z=self.magnet_size, center=True)
return magnet
def scad(self):
base = self.base()
spindle = Cylinder(d=self.mid_cyl_dia + 10, h=self.spindle_height)
magnet = self.magnet()
x_offset = (self.mid_cyl_dia + 10) / 2.0 - (self.magnet_size / 2.0) + 0.2
z_offset = (self.magnet_size + self.mid_cyl_height) / 2.0
magnet = Translate(x=x_offset, z=z_offset)(magnet)
spinhole = Cylinder(d=8.8, h=4)
spinhole2 = Cylinder(d2=6.5, d1=7.8, h=6)
spinhole2 = Translate(z=4)(spinhole2)
spinhole = Union()(spinhole, spinhole2)
neck_post = self.neck_post()
z_offset = (self.neck_post_len / 2.0) + self.spindle_height
neck_post = Translate(z=z_offset)(neck_post)
#nuthole = Cylinder(d=12, h=40, fn=6)
nuthole = Cylinder(d=20, h=40)
nuthole = Translate(z=8)(nuthole)
spindle = Union()(spindle, neck_post)
spindle = Difference()(spindle, magnet, spinhole, nuthole)
key = Cube(y=8, x=2, z=4, center=True)
key1 = Translate(x=4.5, z=2)(key)
key2 = Translate(x=-4.5, z=2)(key)
spindle = Union()(spindle, key1, key2)
spindle = SCAD_Globals(fn=50)(spindle)
debug = Cylinder(h=8, d=12)()
#spindle = intersection()(debug, spindle)
return spindle
def balance_spindle(self):
base = self.base()
spindle = Cylinder(d=self.mid_cyl_dia + 10, h=self.spindle_height)
magnet = self.magnet()
x_offset = (self.mid_cyl_dia + 10) / 2.0 - (self.magnet_size / 2.0) + 0.2
z_offset = (self.magnet_size + self.mid_cyl_height) / 2.0
magnet = Translate(x=x_offset, z=z_offset)(magnet)
spinhole = Cylinder(d=8.8, h=60, center=True)
neck_post = self.neck_post()
z_offset = (self.neck_post_len / 2.0) + self.spindle_height
neck_post = Translate(z=z_offset)(neck_post)
nuthole = Cylinder(d=22.4, h=7)
nuthole2 = Translate(z=20.8)(nuthole)
spindle = Union()(spindle, neck_post)
spindle = Difference()(spindle, magnet, spinhole, nuthole, nuthole2)
#spindle = Union()(spindle, base)
spindle = SCAD_Globals(fn=50)(spindle)
return spindle
def spindle_brace(self):
dim = 20
shdim = 3
offset = (dim - 2) - shdim * 0.5
base = Cube(x=dim * 2, y=dim * 2, z=7, center=True)
bearing = Cylinder(d=22.4, h=7, center=True)
shole = Cylinder(d=3, h=7, center=True)
shole1 = Translate(x=offset, y=offset)(shole)
shole2 = Translate(x=-offset, y=offset)(shole)
shole3 = Translate(x=-offset, y=-offset)(shole)
shole4 = Translate(x=offset, y=-offset)(shole)
sholes = Union()(shole1, shole2, shole3, shole4)
brace = Difference()(base, bearing, sholes)
brace = SCAD_Globals(fn=50)(brace)
return brace
sb = SpindleBase()
sb.render("spindle.scad")
if not os.path.exists("spindle.stl"):
sb.render("spindle.stl")
sb.balance_spindle().render("balance_spindle.scad")
if not os.path.exists("balance_spindle.stl"):
sb.balance_spindle().render("balance_spindle.stl")
sb.spindle_brace().render("spindle_brace.scad")
if not os.path.exists("spindle_brace.stl"):
sb.spindle_brace().render("spindle_brace.stl")
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to define a phonon band structure.
"""
import collections
import numpy as np
from monty.json import MSONable
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.bandstructure import Kpoint
def get_reasonable_repetitions(natoms):
"""
Choose the number of repetitions
according to the number of atoms in the system
"""
if natoms < 4:
return [3, 3, 3]
if 4 <= natoms < 15:
return [2, 2, 2]
if 15 <= natoms < 50:
return [2, 2, 1]
return [1, 1, 1]
def eigenvectors_from_displacements(disp, masses):
"""
Calculate the eigenvectors from the atomic displacements
"""
nphonons, natoms, ndirections = disp.shape
sqrt_masses = np.sqrt(masses)
return np.einsum("nax,a->nax", disp, sqrt_masses)
def estimate_band_connection(prev_eigvecs, eigvecs, prev_band_order):
"""
A function to order the phonon eigenvectors taken from phonopy
"""
metric = np.abs(np.dot(prev_eigvecs.conjugate().T, eigvecs))
connection_order = []
for overlaps in metric:
maxval = 0
for i in reversed(range(len(metric))):
val = overlaps[i]
if i in connection_order:
continue
if val > maxval:
maxval = val
maxindex = i
connection_order.append(maxindex)
band_order = [connection_order[x] for x in prev_band_order]
return band_order
class PhononBandStructure(MSONable):
"""
This is the most generic phonon band structure data possible
it's defined by a list of qpoints + frequencies for each of them.
Additional information may be given for frequencies at Gamma, where
non-analytical contribution may be taken into account.
"""
def __init__(
self,
qpoints,
frequencies,
lattice,
nac_frequencies=None,
eigendisplacements=None,
nac_eigendisplacements=None,
labels_dict=None,
coords_are_cartesian=False,
structure=None,
):
"""
Args:
qpoints: list of qpoint as numpy arrays, in frac_coords of the
given lattice by default
frequencies: list of phonon frequencies in THz as a numpy array with shape
(3*len(structure), len(qpoints)). The First index of the array
refers to the band and the second to the index of the qpoint.
lattice: The reciprocal lattice as a pymatgen Lattice object.
Pymatgen uses the physics convention of reciprocal lattice vectors
WITH a 2*pi coefficient.
nac_frequencies: Frequencies with non-analytical contributions at Gamma in THz.
A list of tuples. The first element of each tuple should be a list
defining the direction (not necessarily a versor, will be normalized
internally). The second element containing the 3*len(structure)
phonon frequencies with non-analytical correction for that direction.
eigendisplacements: the phonon eigendisplacements associated to the
frequencies in cartesian coordinates. A numpy array of complex
numbers with shape (3*len(structure), len(qpoints), len(structure), 3).
he First index of the array refers to the band, the second to the index
of the qpoint, the third to the atom in the structure and the fourth
to the cartesian coordinates.
nac_eigendisplacements: the phonon eigendisplacements associated to the
non-analytical frequencies in nac_frequencies in cartesian coordinates.
A list of tuples. The first element of each tuple should be a list
defining the direction. The second element containing a numpy array of
complex numbers with shape (3*len(structure), len(structure), 3).
labels_dict: (dict) of {} this links a qpoint (in frac coords or
cartesian coordinates depending on the coords) to a label.
coords_are_cartesian: Whether the qpoint coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure
"""
self.lattice_rec = lattice
self.qpoints = []
self.labels_dict = {}
self.structure = structure
if eigendisplacements is None:
eigendisplacements = np.array([])
self.eigendisplacements = eigendisplacements
if labels_dict is None:
labels_dict = {}
for q in qpoints:
# let see if this qpoint has been assigned a label
label = None
for c in labels_dict:
if np.linalg.norm(q - np.array(labels_dict[c])) < 0.0001:
label = c
self.labels_dict[label] = Kpoint(
q,
lattice,
label=label,
coords_are_cartesian=coords_are_cartesian,
)
self.qpoints.append(Kpoint(q, lattice, label=label, coords_are_cartesian=coords_are_cartesian))
self.bands = frequencies
self.nb_bands = len(self.bands)
self.nb_qpoints = len(self.qpoints)
# normalize directions for nac_frequencies and nac_eigendisplacements
self.nac_frequencies = []
self.nac_eigendisplacements = []
if nac_frequencies is not None:
for t in nac_frequencies:
self.nac_frequencies.append(([i / np.linalg.norm(t[0]) for i in t[0]], t[1]))
if nac_eigendisplacements is not None:
for t in nac_eigendisplacements:
self.nac_eigendisplacements.append(([i / np.linalg.norm(t[0]) for i in t[0]], t[1]))
def min_freq(self):
"""
Returns the point where the minimum frequency is reached and its value
"""
i = np.unravel_index(np.argmin(self.bands), self.bands.shape)
return self.qpoints[i[1]], self.bands[i]
def has_imaginary_freq(self, tol=1e-5):
"""
True if imaginary frequencies are present in the BS.
"""
return self.min_freq()[1] + tol < 0
@property
def has_nac(self):
"""
True if nac_frequencies are present.
"""
return len(self.nac_frequencies) > 0
@property
def has_eigendisplacements(self):
"""
True if eigendisplacements are present.
"""
return len(self.eigendisplacements) > 0
def get_nac_frequencies_along_dir(self, direction):
"""
Returns the nac_frequencies for the given direction (not necessarily a versor).
None if the direction is not present or nac_frequencies has not been calculated.
Args:
direction: the direction as a list of 3 elements
Returns:
the frequencies as a numpy array o(3*len(structure), len(qpoints)).
None if not found.
"""
versor = [i / np.linalg.norm(direction) for i in direction]
for d, f in self.nac_frequencies:
if np.allclose(versor, d):
return f
return None
def get_nac_eigendisplacements_along_dir(self, direction):
"""
Returns the nac_eigendisplacements for the given direction (not necessarily a versor).
None if the direction is not present or nac_eigendisplacements has not been calculated.
Args:
direction: the direction as a list of 3 elements
Returns:
the eigendisplacements as a numpy array of complex numbers with shape
(3*len(structure), len(structure), 3). None if not found.
"""
versor = [i / np.linalg.norm(direction) for i in direction]
for d, e in self.nac_eigendisplacements:
if np.allclose(versor, d):
return e
return None
def asr_breaking(self, tol_eigendisplacements=1e-5):
"""
Returns the breaking of the acoustic sum rule for the three acoustic modes,
if Gamma is present. None otherwise.
If eigendisplacements are available they are used to determine the acoustic
modes: selects the bands corresponding to the eigendisplacements that
represent to a translation within tol_eigendisplacements. If these are not
identified or eigendisplacements are missing the first 3 modes will be used
(indices [0:3]).
"""
for i in range(self.nb_qpoints):
if np.allclose(self.qpoints[i].frac_coords, (0, 0, 0)):
if self.has_eigendisplacements:
acoustic_modes_index = []
for j in range(self.nb_bands):
eig = self.eigendisplacements[j][i]
if np.max(np.abs(eig[1:] - eig[:1])) < tol_eigendisplacements:
acoustic_modes_index.append(j)
# if acoustic modes are not correctly identified return use
# the first three modes
if len(acoustic_modes_index) != 3:
acoustic_modes_index = [0, 1, 2]
return self.bands[acoustic_modes_index, i]
return self.bands[:3, i]
return None
def as_dict(self):
"""
:return: MSONable dict
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self.lattice_rec.as_dict(),
"qpoints": [],
}
# qpoints are not Kpoint objects dicts but are frac coords.Tthis makes
# the dict smaller and avoids the repetition of the lattice
for q in self.qpoints:
d["qpoints"].append(q.as_dict()["fcoords"])
d["bands"] = self.bands.tolist()
d["labels_dict"] = {}
for kpoint_letter, kpoint_object in self.labels_dict.items():
d["labels_dict"][kpoint_letter] = kpoint_object.as_dict()["fcoords"]
# split the eigendisplacements to real and imaginary part for serialization
d["eigendisplacements"] = dict(
real=np.real(self.eigendisplacements).tolist(),
imag=np.imag(self.eigendisplacements).tolist(),
)
d["nac_eigendisplacements"] = [
(direction, dict(real=np.real(e).tolist(), imag=np.imag(e).tolist()))
for direction, e in self.nac_eigendisplacements
]
d["nac_frequencies"] = [(direction, f.tolist()) for direction, f in self.nac_frequencies]
if self.structure:
d["structure"] = self.structure.as_dict()
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: PhononBandStructure
"""
lattice_rec = Lattice(d["lattice_rec"]["matrix"])
eigendisplacements = np.array(d["eigendisplacements"]["real"]) + np.array(d["eigendisplacements"]["imag"]) * 1j
nac_eigendisplacements = [
(direction, np.array(e["real"]) + np.array(e["imag"]) * 1j) for direction, e in d["nac_eigendisplacements"]
]
nac_frequencies = [(direction, np.array(f)) for direction, f in d["nac_frequencies"]]
structure = Structure.from_dict(d["structure"]) if "structure" in d else None
return cls(
d["qpoints"],
np.array(d["bands"]),
lattice_rec,
nac_frequencies,
eigendisplacements,
nac_eigendisplacements,
d["labels_dict"],
structure=structure,
)
class PhononBandStructureSymmLine(PhononBandStructure):
r"""
This object stores phonon band structures along selected (symmetry) lines in the
Brillouin zone. We call the different symmetry lines (ex: \\Gamma to Z)
"branches".
"""
def __init__(
self,
qpoints,
frequencies,
lattice,
has_nac=False,
eigendisplacements=None,
labels_dict=None,
coords_are_cartesian=False,
structure=None,
):
"""
Args:
qpoints: list of qpoints as numpy arrays, in frac_coords of the
given lattice by default
frequencies: list of phonon frequencies in eV as a numpy array with shape
(3*len(structure), len(qpoints))
lattice: The reciprocal lattice as a pymatgen Lattice object.
Pymatgen uses the physics convention of reciprocal lattice vectors
WITH a 2*pi coefficient
has_nac: specify if the band structure has been produced taking into account
non-analytical corrections at Gamma. If True frequenciens at Gamma from
diffent directions will be stored in naf. Default False.
eigendisplacements: the phonon eigendisplacements associated to the
frequencies in cartesian coordinates. A numpy array of complex
numbers with shape (3*len(structure), len(qpoints), len(structure), 3).
he First index of the array refers to the band, the second to the index
of the qpoint, the third to the atom in the structure and the fourth
to the cartesian coordinates.
labels_dict: (dict) of {} this links a qpoint (in frac coords or
cartesian coordinates depending on the coords) to a label.
coords_are_cartesian: Whether the qpoint coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure
"""
super().__init__(
qpoints=qpoints,
frequencies=frequencies,
lattice=lattice,
nac_frequencies=None,
eigendisplacements=eigendisplacements,
nac_eigendisplacements=None,
labels_dict=labels_dict,
coords_are_cartesian=coords_are_cartesian,
structure=structure,
)
self._reuse_init(eigendisplacements, frequencies, has_nac, qpoints)
def _reuse_init(self, eigendisplacements, frequencies, has_nac, qpoints):
self.distance = []
self.branches = []
one_group = []
branches_tmp = []
# get labels and distance for each qpoint
previous_qpoint = self.qpoints[0]
previous_distance = 0.0
previous_label = self.qpoints[0].label
for i in range(self.nb_qpoints):
label = self.qpoints[i].label
if label is not None and previous_label is not None:
self.distance.append(previous_distance)
else:
self.distance.append(
np.linalg.norm(self.qpoints[i].cart_coords - previous_qpoint.cart_coords) + previous_distance
)
previous_qpoint = self.qpoints[i]
previous_distance = self.distance[i]
if label:
if previous_label:
if len(one_group) != 0:
branches_tmp.append(one_group)
one_group = []
previous_label = label
one_group.append(i)
if len(one_group) != 0:
branches_tmp.append(one_group)
for b in branches_tmp:
self.branches.append(
{
"start_index": b[0],
"end_index": b[-1],
"name": str(self.qpoints[b[0]].label) + "-" + str(self.qpoints[b[-1]].label),
}
)
# extract the frequencies with non-analytical contribution at gamma
if has_nac:
naf = []
nac_eigendisplacements = []
for i in range(self.nb_qpoints):
# get directions with nac irrespectively of the label_dict. NB: with labels
# the gamma point is expected to appear twice consecutively.
if np.allclose(qpoints[i], (0, 0, 0)):
if i > 0 and not np.allclose(qpoints[i - 1], (0, 0, 0)):
q_dir = self.qpoints[i - 1]
direction = q_dir.frac_coords / np.linalg.norm(q_dir.frac_coords)
naf.append((direction, frequencies[:, i]))
if self.has_eigendisplacements:
nac_eigendisplacements.append((direction, eigendisplacements[:, i]))
if i < len(qpoints) - 1 and not np.allclose(qpoints[i + 1], (0, 0, 0)):
q_dir = self.qpoints[i + 1]
direction = q_dir.frac_coords / np.linalg.norm(q_dir.frac_coords)
naf.append((direction, frequencies[:, i]))
if self.has_eigendisplacements:
nac_eigendisplacements.append((direction, eigendisplacements[:, i]))
self.nac_frequencies = np.array(naf)
self.nac_eigendisplacements = np.array(nac_eigendisplacements)
def get_equivalent_qpoints(self, index):
"""
Returns the list of qpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the qpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel)
"""
# if the qpoint has no label it can"t have a repetition along the band
# structure line object
if self.qpoints[index].label is None:
return [index]
list_index_qpoints = []
for i in range(self.nb_qpoints):
if self.qpoints[i].label == self.qpoints[index].label:
list_index_qpoints.append(i)
return list_index_qpoints
def get_branch(self, index):
r"""
Returns in what branch(es) is the qpoint. There can be several
branches.
Args:
index: the qpoint index
Returns:
A list of dictionaries [{"name","start_index","end_index","index"}]
indicating all branches in which the qpoint is. It takes into
account the fact that one qpoint (e.g., \\Gamma) can be in several
branches
"""
to_return = []
for i in self.get_equivalent_qpoints(index):
for b in self.branches:
if b["start_index"] <= i <= b["end_index"]:
to_return.append(
{
"name": b["name"],
"start_index": b["start_index"],
"end_index": b["end_index"],
"index": i,
}
)
return to_return
def write_phononwebsite(self, filename):
"""
Write a json file for the phononwebsite:
http://henriquemiranda.github.io/phononwebsite
"""
import json
with open(filename, "w") as f:
json.dump(self.as_phononwebsite(), f)
def as_phononwebsite(self):
"""
Return a dictionary with the phononwebsite format:
http://henriquemiranda.github.io/phononwebsite
"""
d = {}
# define the lattice
d["lattice"] = self.structure.lattice._matrix.tolist()
# define atoms
atom_pos_car = []
atom_pos_red = []
atom_types = []
for site in self.structure.sites:
atom_pos_car.append(site.coords.tolist())
atom_pos_red.append(site.frac_coords.tolist())
atom_types.append(site.species_string)
# default for now
d["repetitions"] = get_reasonable_repetitions(len(atom_pos_car))
d["natoms"] = len(atom_pos_car)
d["atom_pos_car"] = atom_pos_car
d["atom_pos_red"] = atom_pos_red
d["atom_types"] = atom_types
d["atom_numbers"] = self.structure.atomic_numbers
d["formula"] = self.structure.formula
d["name"] = self.structure.formula
# get qpoints
qpoints = []
for q in self.qpoints:
qpoints.append(list(q.frac_coords))
d["qpoints"] = qpoints
# get labels
hsq_dict = collections.OrderedDict()
for nq, q in enumerate(self.qpoints):
if q.label is not None:
hsq_dict[nq] = q.label
# get distances
dist = 0
nqstart = 0
distances = [dist]
line_breaks = []
for nq in range(1, len(qpoints)):
q1 = np.array(qpoints[nq])
q2 = np.array(qpoints[nq - 1])
# detect jumps
if (nq in hsq_dict) and (nq - 1 in hsq_dict):
if hsq_dict[nq] != hsq_dict[nq - 1]:
hsq_dict[nq - 1] += "|" + hsq_dict[nq]
del hsq_dict[nq]
line_breaks.append((nqstart, nq))
nqstart = nq
else:
dist += np.linalg.norm(q1 - q2)
distances.append(dist)
line_breaks.append((nqstart, len(qpoints)))
d["distances"] = distances
d["line_breaks"] = line_breaks
d["highsym_qpts"] = list(hsq_dict.items())
# eigenvalues
thz2cm1 = 33.35641
bands = self.bands.copy() * thz2cm1
d["eigenvalues"] = bands.T.tolist()
# eigenvectors
eigenvectors = self.eigendisplacements.copy()
eigenvectors /= np.linalg.norm(eigenvectors[0, 0])
eigenvectors = eigenvectors.swapaxes(0, 1)
eigenvectors = np.array([eigenvectors.real, eigenvectors.imag])
eigenvectors = np.rollaxis(eigenvectors, 0, 5)
d["vectors"] = eigenvectors.tolist()
return d
def band_reorder(self):
"""
Re-order the eigenvalues according to the similarity of the eigenvectors
"""
eiv = self.eigendisplacements
eig = self.bands
nphonons, nqpoints = self.bands.shape
order = np.zeros([nqpoints, nphonons], dtype=int)
order[0] = np.array(range(nphonons))
# get the atomic masses
atomic_masses = [site.specie.atomic_mass for site in self.structure.sites]
# get order
for nq in range(1, nqpoints):
old_eiv = eigenvectors_from_displacements(eiv[:, nq - 1], atomic_masses)
new_eiv = eigenvectors_from_displacements(eiv[:, nq], atomic_masses)
order[nq] = estimate_band_connection(
old_eiv.reshape([nphonons, nphonons]).T,
new_eiv.reshape([nphonons, nphonons]).T,
order[nq - 1],
)
# reorder
for nq in range(1, nqpoints):
eivq = eiv[:, nq]
eigq = eig[:, nq]
eiv[:, nq] = eivq[order[nq]]
eig[:, nq] = eigq[order[nq]]
def as_dict(self):
"""
Returns: MSONable dict
"""
d = super().as_dict()
# remove nac_frequencies and nac_eigendisplacements as they are reconstructed
# in the __init__ when the dict is deserialized
nac_frequencies = d.pop("nac_frequencies")
d.pop("nac_eigendisplacements")
d["has_nac"] = len(nac_frequencies) > 0
return d
@classmethod
def from_dict(cls, d):
"""
Args:
d: Dict representation
Returns: PhononBandStructureSummLine
"""
lattice_rec = Lattice(d["lattice_rec"]["matrix"])
eigendisplacements = np.array(d["eigendisplacements"]["real"]) + np.array(d["eigendisplacements"]["imag"]) * 1j
structure = Structure.from_dict(d["structure"]) if "structure" in d else None
return cls(
d["qpoints"],
np.array(d["bands"]),
lattice_rec,
d["has_nac"],
eigendisplacements,
d["labels_dict"],
structure=structure,
)
|
|
#!/usr/bin/env python
import os
import sys
import subprocess
import cPickle
def run_uniquify_fasta(fasta_file, output_file, pickle_file):
"""
Returns a subprocess from command-line script "uniquify_fasta.py" <subprocess.Popen>
Input:
fasta_file <str> -- path to fasta file
output_file <str> -- path to output (simplified) fasta file
pickle_file <str> -- path to pickle file containing name-mapping of new->old
"""
uniquify_cmd = gen_uniquify_fasta_cmd(fasta_file, output_file, pickle_file = pickle_file)
return subprocess.Popen(uniquify_cmd)
def gen_uniquify_fasta_cmd(fasta_file, output_file, pickle_file = None):
"""
Returns a "uniquify_fasta.py" command <list>
Input:
fasta_file <str> -- path to fasta file
output_file <str> -- path to output (simplified) fasta file
pickle_file <str> -- path to pickle file containing name-mapping of new->old
"""
if pickle_file == None:
pickle_file = os.path.splitext(fasta_file)[0]
pickle_file += '.pkl'
return ['uniquify_fasta.py', '-f', fasta_file, '-o', output_file, '-p', pickle_file]
def run_fasta2phy(fasta_file, phylip_file):
"""
Returns a subprocess from command-line script "fasta2phy" <subprocess.Popen>
Input:
fasta_file <str> -- path to fasta file
phylip_file <str> -- path to output phylip file
"""
fasta2phy_cmd = gen_fasta2phy_cmd(fasta_file, phylip_file)
return subprocess.Popen(fasta2phy_cmd)
def gen_fasta2phy_cmd(fasta_file, phylip_file):
"""
Returns a "fasta2phy" command <list>
Input:
fasta_file <str> -- path to fasta file
phylip_file <str> -- path to output phylip file
"""
return ['fasta2phy', '-i', fasta_file, '-o', phylip_file]
def run_seqboot(phylip_file, output_file, args):
"""
Returns a subprocess from Joe Felsenstein's "seqboot" utility <subprocess.Popen>
Input:
phylip_file <str> -- path to phylip alignment
output_file <str> -- path to bootstrapped alignments
args <Namespace> -- keyword arguments for utility
TO-DO:
-> Refactor the args parameter to make use of *args
"""
if os.path.isfile('outfile') is False:
subprocess.Popen(['touch', 'outfile']).wait()
if os.path.isfile(output_file) is True:
subprocess.Popen(['rm', output_file]).wait()
seq_boot_str = '\n'.join(gen_seqboot_args(phylip_file, output_file, args))
print_cmd = ['printf', seq_boot_str]
print_process = subprocess.Popen(print_cmd, stdout=subprocess.PIPE)
if args.verbose:
stderr = sys.stderr
else:
stderr = open(args.log, 'a')
seq_boot_process = subprocess.Popen(['seqboot'], stdin=print_process.stdout, stderr=stderr, stdout=stderr)
return seq_boot_process
def gen_seqboot_args(phylip_file, output_file, args):
"""
Returns a command list for Joe Felsenstein's "seqboot" utility <list>
Input:
phylip_file <str> -- path to phylip alignment
output_file <str> -- path to bootstrapped alignments
args <Namespace> -- keyword arguments for utility
TO-DO:
-> Refactor the args parameter to make use of *args
"""
params = {}
params['J'] = ('Jackknife' if args.jackknife else 'Bootstrap')
params['R'] = str(args.n)
seq_boot_args = [phylip_file]
for key, val in params.iteritems():
seq_boot_args.extend([str(key), str(val)])
seq_boot_args.extend(['Y', str(args.s), 'F', output_file])
return seq_boot_args
def run_FastTree(args, aln_file, tree_file, bootstrap = True):
"""
Returns a subprocess of the FastTree program <subprocess.Popen>
Input:
args <Namespace> -- keyword arguments for utility
aln_file <str> -- path to alignment (FASTA or PHYLIP format)
tree_file <str> -- path to output tree (Newick format)
bootstrap <boolean> -- tree should be run on a bootstrapped alignment
TO-DO:
-> Refactor the args parameter to make use of *args
"""
fasttree_cmd = gen_fasttree_cmd(args, aln_file, tree_file)
if args.verbose:
stderr = sys.stderr
else:
stderr = open(args.log, 'a')
return subprocess.Popen(fasttree_cmd, stderr=stderr)
def gen_fasttree_cmd(args, aln_file, tree_file, bootstrap = True):
"""
Returns a command list for the FastTree program <list>
Input:
args <Namespace> -- keyword arguments for utility
aln_file <str> -- path to alignment (FASTA or PHYLIP format)
tree_file <str> -- path to output tree (Newick format)
bootstrap <boolean> -- tree should be run on a bootstrapped alignment
TO-DO:
-> Refactor the args parameter to make use of *args
"""
params = {}
params['-cat'] = str(args.cat)
if bootstrap:
params['-n'] = str(args.n)
if args.gamma: params['-gamma'] = ''
if args.wag: params['-wag'] = ''
if args.nt: params['-nt'] = ''
if not args.verbose:
params['-quiet'] = ''
params['-nopr'] = ''
params['-log'] = args.log
fastree_args = [('FastTreeMP' if args.mp else 'FastTree')]
for key, val in params.iteritems():
if val:
fastree_args.extend([key, val])
else:
fastree_args.append(key)
fastree_args.extend(['-out', tree_file, aln_file])
return fastree_args
def run_compare_bootstraps(single_tree, bootstrapped_trees, output_file):
"""
Returns a subprocess of Morgan Price's "CompareToBootstrap" perl script <subprocess.Popen>
Input:
single_tree <str> -- path to reference tree file (Newick)
bootstrapped_trees <str> -- path to bootstrapped trees file (Newick)
output_file <str> -- path to write tree annotated with bootstrap support
"""
if not check_perl_module('MOTree'):
raise OSError, "Check to make sure your PERL5LIB path includes the MOTree.pm module"
output_handle = open(output_file, 'wb')
compare_cmd = gen_compare_cmd(single_tree, bootstrapped_trees)
if args.verbose:
stderr = sys.stderr
else:
stderr = open(args.log, 'a')
return subprocess.Popen(compare_cmd, stdout=output_handle, stderr=stderr)
def gen_compare_cmd(single_tree, bootstrapped_trees):
"""
Returns a command list for Morgan Price's "CompareToBootstrap" perl script <list>
Input:
single_tree <str> -- path to reference tree file (Newick)
bootstrapped_trees <str> -- path to bootstrapped trees file (Newick)
"""
cmp_prog_path = '/home/alexh/bin/MOTreeComparison/CompareToBootstrap.pl'
compare_cmd = ['perl', cmp_prog_path, '-tree', single_tree, '-boot', bootstrapped_trees]
return compare_cmd
def check_perl_module(perl_module):
"""
Returns perl_module is accessible in PERL5LIB
Input:
perl_module <str> -- name of perl module
"""
cmd = ['perl', '-M{0}'.format(perl_module), '-e1']
try:
exit_status = subprocess.Popen(cmd).wait()
return int(exit_status) == 0
except OSError:
sys.exit("Checking for perl module {0} failed!".format(perl_module))
def relabel_nodes(tree_file, pickle_file, output_file):
"""
Relabel nodes on tree according to mapping saved in pickle file
Input:
tree_file <str> -- path to input tree file (Newick)
pickle_file <str> -- path to pickle containing new->old mapping
output_file <str> -- path to write renamed tree (Newick)
"""
import ete3
pickle_handle = open(pickle_file, 'rb')
name_map = cPickle.load(pickle_handle)
pickle_handle.close()
tree = ete3.Tree(tree_file)
for node in tree.iter_leaves():
if node.name in name_map:
node.name = name_map[node.name]
else:
sys.stderr.write("{0} from tree ({1}) not found in pickle ({2})".format(node.name, tree_file, pickle_file))
tree.write(outfile = output_file)
def cleanup(files):
"""
Deletes files from input
Input:
files <list[str]> -- paths to files that should be removed
"""
for f_path in files:
if os.path.isfile(f_path):
try:
os.remove(f_path)
except OSError:
continue
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description = \
'generate bootstrapped trees using FastTree')
parser.add_argument(\
'-f', type = str, required = True, \
help = 'aligned fasta (required)')
parser.add_argument(\
'-n', type = int, default = 100, \
help = 'number of bootstrap replicates')
parser.add_argument(\
'-s', type = int, default = 12345, \
help = 'seed for bootstrapping')
parser.add_argument(\
'--mp', action = 'store_true', \
help = 'Use MP version of FastTree for parallel computation')
parser.add_argument(\
'--jackknife', action = 'store_true', \
help = 'generate jackknife rather than bootstrap replicates')
parser.add_argument(\
'--cat', type = int, default = 20, \
help = 'number of rate categories for sites')
parser.add_argument(\
'--gamma', action = 'store_true', \
help = 'rescale branch lengths to optimize gamma likelihood')
parser.add_argument(\
'--wag', action = 'store_true', \
help = 'Whelan-And-Goldman 2001 model instead of (default) Jones-Taylor-Thorton 1992 model (aa only)')
parser.add_argument(\
'--nt', action = 'store_true', \
help = 'nucleotide alignment (default:False)')
parser.add_argument(\
'--gtr', action = 'store_true', \
help = 'generalized time-reversible model (default:False) (nt only)')
parser.add_argument(\
'--verbose', action = 'store_true', \
help = 'print progress metrics to stderr')
parser.add_argument(\
'--log', type = str, default = 'log.txt', \
help = 'log file for stderr logging when not run with verbose')
parser.add_argument(\
'--clean', action = 'store_true', \
help = 'clean up temporary files after generation of final tree')
args = parser.parse_args()
if args.wag and any([args.nt, args.gtr]):
sys.exit('WAG model incompatible with nt alignments')
if args.gtr and not args.nt:
sys.exit('GTR model incompatible with aa alignments')
if args.s % 2 == 0:
sys.exit('Seed must be odd for seqboot')
base_name, ext = os.path.splitext(args.f)
simple_name = base_name + '.simple' + ext
pickle_name = base_name + '.pkl'
phylip_name = base_name + '.phylip'
bootstrap_name = base_name + '.boot.phylip'
tree_name = base_name + '.tree'
boottree_name = base_name + '.boots.tree'
compared_name = base_name + '.bootvals.tree'
relabeled_name = base_name + '.final.tree'
run_uniquify_fasta(args.f, simple_name, pickle_name).wait()
run_fasta2phy(simple_name, phylip_name).wait()
run_seqboot(phylip_name, bootstrap_name, args).wait()
run_FastTree(args, simple_name, tree_name, bootstrap = False).wait()
run_FastTree(args, bootstrap_name, boottree_name, bootstrap = True).wait()
run_compare_bootstraps(tree_name, boottree_name, compared_name).wait()
relabel_nodes(compared_name, pickle_name, relabeled_name)
if args.clean:
cleanup([simple_name, pickle_name, phylip_name, bootstrap_name, tree_name, boottree_name, compared_name])
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import utils as cells_utils
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova.i18n import _LE
from nova import notifications
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# List of fields that can be joined in DB layer.
_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups',
'pci_devices', 'tags']
# These are fields that are optional but don't translate to db columns
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault', 'flavor', 'old_flavor',
'new_flavor', 'ec2_ids']
# These are fields that are optional and in instance_extra
_INSTANCE_EXTRA_FIELDS = ['numa_topology', 'pci_requests',
'flavor', 'vcpu_model']
# These are fields that can be specified as expected_attrs
INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS +
_INSTANCE_EXTRA_FIELDS)
# These are fields that most query calls load by default
INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups']
def _expected_cols(expected_attrs):
"""Return expected_attrs that are columns needing joining.
NB: This function may modify expected_attrs if one
requested attribute requires another.
"""
if not expected_attrs:
return expected_attrs
if ('system_metadata' in expected_attrs and
'flavor' not in expected_attrs):
# NOTE(danms): If the client asked for sysmeta, we have to
# pull flavor so we can potentially provide compatibility
expected_attrs.append('flavor')
simple_cols = [attr for attr in expected_attrs
if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS]
complex_cols = ['extra.%s' % field
for field in _INSTANCE_EXTRA_FIELDS
if field in expected_attrs]
if complex_cols:
simple_cols.append('extra')
simple_cols = filter(lambda x: x not in _INSTANCE_EXTRA_FIELDS,
simple_cols)
if (any([flavor in expected_attrs
for flavor in ['flavor', 'old_flavor', 'new_flavor']]) and
'system_metadata' not in simple_cols):
# NOTE(danms): While we're maintaining compatibility with
# flavor data being stored in system_metadata, we need to
# ask for it any time flavors are requested.
simple_cols.append('system_metadata')
expected_attrs.append('system_metadata')
return simple_cols + complex_cols
def compat_instance(instance):
"""Create a dict-like instance structure from an objects.Instance.
This is basically the same as nova.objects.base.obj_to_primitive(),
except that it includes some instance-specific details, like stashing
flavor information in system_metadata.
If you have a function (or RPC client) that needs to see the instance
as a dict that has flavor information in system_metadata, use this
to appease it (while you fix said thing).
:param instance: a nova.objects.Instance instance
:returns: a dict-based instance structure
"""
if not isinstance(instance, objects.Instance):
return instance
db_instance = copy.deepcopy(base.obj_to_primitive(instance))
flavor_attrs = [('', 'flavor'), ('old_', 'old_flavor'),
('new_', 'new_flavor')]
for prefix, attr in flavor_attrs:
flavor = (instance.obj_attr_is_set(attr) and
getattr(instance, attr) or None)
if flavor:
# NOTE(danms): If flavor is unset or None, don't
# copy it into the primitive's system_metadata
db_instance['system_metadata'] = \
flavors.save_flavor_info(
db_instance.get('system_metadata', {}),
flavor, prefix)
if attr in db_instance:
del db_instance[attr]
return db_instance
# TODO(berrange): Remove NovaObjectDictCompat
class Instance(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added info_cache
# Version 1.2: Added security_groups
# Version 1.3: Added expected_vm_state and admin_state_reset to
# save()
# Version 1.4: Added locked_by and deprecated locked
# Version 1.5: Added cleaned
# Version 1.6: Added pci_devices
# Version 1.7: String attributes updated to support unicode
# Version 1.8: 'security_groups' and 'pci_devices' cannot be None
# Version 1.9: Make uuid a non-None real string
# Version 1.10: Added use_slave to refresh and get_by_uuid
# Version 1.11: Update instance from database during destroy
# Version 1.12: Added ephemeral_key_uuid
# Version 1.13: Added delete_metadata_key()
# Version 1.14: Added numa_topology
# Version 1.15: PciDeviceList 1.1
# Version 1.16: Added pci_requests
# Version 1.17: Added tags
# Version 1.18: Added flavor, old_flavor, new_flavor
# Version 1.19: Added vcpu_model
# Version 1.20: Added ec2_ids
VERSION = '1.20'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'image_ref': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'hostname': fields.StringField(nullable=True),
'launch_index': fields.IntegerField(nullable=True),
'key_name': fields.StringField(nullable=True),
'key_data': fields.StringField(nullable=True),
'power_state': fields.IntegerField(nullable=True),
'vm_state': fields.StringField(nullable=True),
'task_state': fields.StringField(nullable=True),
'memory_mb': fields.IntegerField(nullable=True),
'vcpus': fields.IntegerField(nullable=True),
'root_gb': fields.IntegerField(nullable=True),
'ephemeral_gb': fields.IntegerField(nullable=True),
'ephemeral_key_uuid': fields.UUIDField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'instance_type_id': fields.IntegerField(nullable=True),
'user_data': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'scheduled_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'launched_on': fields.StringField(nullable=True),
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
'locked': fields.BooleanField(default=False),
'locked_by': fields.StringField(nullable=True),
'os_type': fields.StringField(nullable=True),
'architecture': fields.StringField(nullable=True),
'vm_mode': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'root_device_name': fields.StringField(nullable=True),
'default_ephemeral_device': fields.StringField(nullable=True),
'default_swap_device': fields.StringField(nullable=True),
'config_drive': fields.StringField(nullable=True),
'access_ip_v4': fields.IPV4AddressField(nullable=True),
'access_ip_v6': fields.IPV6AddressField(nullable=True),
'auto_disk_config': fields.BooleanField(default=False),
'progress': fields.IntegerField(nullable=True),
'shutdown_terminate': fields.BooleanField(default=False),
'disable_terminate': fields.BooleanField(default=False),
'cell_name': fields.StringField(nullable=True),
'metadata': fields.DictOfStringsField(),
'system_metadata': fields.DictOfNullableStringsField(),
'info_cache': fields.ObjectField('InstanceInfoCache',
nullable=True),
'security_groups': fields.ObjectField('SecurityGroupList'),
'fault': fields.ObjectField('InstanceFault', nullable=True),
'cleaned': fields.BooleanField(default=False),
'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
'numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
'tags': fields.ObjectField('TagList'),
'flavor': fields.ObjectField('Flavor'),
'old_flavor': fields.ObjectField('Flavor', nullable=True),
'new_flavor': fields.ObjectField('Flavor', nullable=True),
'vcpu_model': fields.ObjectField('VirtCPUModel', nullable=True),
'ec2_ids': fields.ObjectField('EC2Ids'),
}
obj_extra_fields = ['name']
obj_relationships = {
'fault': [('1.0', '1.0'), ('1.13', '1.2')],
'info_cache': [('1.1', '1.0'), ('1.9', '1.4'), ('1.10', '1.5')],
'security_groups': [('1.2', '1.0')],
'pci_devices': [('1.6', '1.0'), ('1.15', '1.1')],
'numa_topology': [('1.14', '1.0'), ('1.16', '1.1')],
'pci_requests': [('1.16', '1.1')],
'tags': [('1.17', '1.0')],
'flavor': [('1.18', '1.1')],
'old_flavor': [('1.18', '1.1')],
'new_flavor': [('1.18', '1.1')],
'vcpu_model': [('1.19', '1.0')],
'ec2_ids': [('1.20', '1.0')],
}
def __init__(self, *args, **kwargs):
super(Instance, self).__init__(*args, **kwargs)
self._reset_metadata_tracking()
def _reset_metadata_tracking(self, fields=None):
if fields is None or 'system_metadata' in fields:
self._orig_system_metadata = (dict(self.system_metadata) if
'system_metadata' in self else {})
if fields is None or 'metadata' in fields:
self._orig_metadata = (dict(self.metadata) if
'metadata' in self else {})
def obj_reset_changes(self, fields=None):
super(Instance, self).obj_reset_changes(fields)
self._reset_metadata_tracking(fields=fields)
def obj_what_changed(self):
changes = super(Instance, self).obj_what_changed()
if 'metadata' in self and self.metadata != self._orig_metadata:
changes.add('metadata')
if 'system_metadata' in self and (self.system_metadata !=
self._orig_system_metadata):
changes.add('system_metadata')
return changes
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = super(Instance, cls)._obj_from_primitive(context, objver,
primitive)
self._reset_metadata_tracking()
return self
def obj_make_compatible(self, primitive, target_version):
super(Instance, self).obj_make_compatible(primitive, target_version)
target_version = utils.convert_version_to_tuple(target_version)
unicode_attributes = ['user_id', 'project_id', 'image_ref',
'kernel_id', 'ramdisk_id', 'hostname',
'key_name', 'key_data', 'host', 'node',
'user_data', 'availability_zone',
'display_name', 'display_description',
'launched_on', 'locked_by', 'os_type',
'architecture', 'vm_mode', 'root_device_name',
'default_ephemeral_device',
'default_swap_device', 'config_drive',
'cell_name']
if target_version < (1, 7):
# NOTE(danms): Before 1.7, we couldn't handle unicode in
# string fields, so squash it here
for field in [x for x in unicode_attributes if x in primitive
and primitive[x] is not None]:
primitive[field] = primitive[field].encode('ascii', 'replace')
if target_version < (1, 18):
if 'system_metadata' in primitive:
for ftype in ('', 'old_', 'new_'):
attrname = '%sflavor' % ftype
primitive.pop(attrname, None)
if self[attrname] is not None:
flavors.save_flavor_info(
primitive['system_metadata'],
getattr(self, attrname), ftype)
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for key in self.fields:
if key == 'name':
# NOTE(danms): prevent recursion
continue
elif not self.obj_attr_is_set(key):
# NOTE(danms): Don't trigger lazy-loads
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@staticmethod
def _migrate_flavor(instance):
"""Migrate a fractional flavor to a full one stored in extra.
This method migrates flavor information stored in an instance's
system_metadata to instance_extra. Since the information in the
former is not complete, we must attempt to fetch the original
flavor by id to merge its extra_specs with what we store.
This is a transitional tool and can be removed in a later release
once we can ensure that everyone has migrated their instances
(likely the L release).
"""
# NOTE(danms): Always use admin context and read_deleted=yes here
# because we need to make sure we can look up our original flavor
# and try to reconstruct extra_specs, even if it has been deleted
ctxt = context.get_admin_context(read_deleted='yes')
instance.flavor = flavors.extract_flavor(instance)
flavors.delete_flavor_info(instance.system_metadata, '')
for ftype in ('old', 'new'):
attrname = '%s_flavor' % ftype
prefix = '%s_' % ftype
try:
flavor = flavors.extract_flavor(instance, prefix)
setattr(instance, attrname, flavor)
flavors.delete_flavor_info(instance.system_metadata, prefix)
except KeyError:
setattr(instance, attrname, None)
# NOTE(danms): Merge in the extra_specs from the original flavor
# since they weren't stored with the instance.
for flv in (instance.flavor, instance.new_flavor, instance.old_flavor):
if flv is not None:
try:
db_flavor = objects.Flavor.get_by_flavor_id(ctxt,
flv.flavorid)
except exception.FlavorNotFound:
continue
extra_specs = dict(db_flavor.extra_specs)
extra_specs.update(flv.get('extra_specs', {}))
flv.extra_specs = extra_specs
def _flavor_from_db(self, db_flavor):
"""Load instance flavor information from instance_extra."""
flavor_info = jsonutils.loads(db_flavor)
self.flavor = objects.Flavor.obj_from_primitive(flavor_info['cur'])
if flavor_info['old']:
self.old_flavor = objects.Flavor.obj_from_primitive(
flavor_info['old'])
else:
self.old_flavor = None
if flavor_info['new']:
self.new_flavor = objects.Flavor.obj_from_primitive(
flavor_info['new'])
else:
self.new_flavor = None
self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])
def _maybe_migrate_flavor(self, db_inst, expected_attrs):
"""Determine the proper place and format for flavor loading.
This method loads the flavor information into the instance. If
the information is already migrated to instance_extra, then we
load that. If it is in system_metadata, we migrate it to extra.
If, however, we're loading an instance for an older client and
the flavor has already been migrated, we need to stash it back
into system metadata, which we do here.
This is transitional and can be removed when we remove
_migrate_flavor().
"""
version = utils.convert_version_to_tuple(self.VERSION)
flavor_requested = any(
[flavor in expected_attrs
for flavor in ('flavor', 'old_flavor', 'new_flavor')])
flavor_implied = (version < (1, 18) and
'system_metadata' in expected_attrs)
# NOTE(danms): This is compatibility logic. If the flavor
# attributes were requested, then we do this load/migrate
# logic. However, if the instance is old, we might need to
# do it anyway in order to satisfy our sysmeta-based contract.
if not (flavor_requested or flavor_implied):
return False
migrated_flavor = False
if flavor_implied:
# This instance is from before flavors were migrated out of
# system_metadata. Make sure that we honor that.
instance_extra = db_inst.get('extra') or {}
if instance_extra.get('flavor') is not None:
self._flavor_from_db(instance_extra['flavor'])
sysmeta = self.system_metadata
flavors.save_flavor_info(sysmeta, self.flavor)
del self.flavor
if self.old_flavor:
flavors.save_flavor_info(sysmeta, self.old_flavor, 'old_')
del self.old_flavor
if self.new_flavor:
flavors.save_flavor_info(sysmeta, self.new_flavor, 'new_')
del self.new_flavor
self.system_metadata = sysmeta
else:
# Migrate the flavor from system_metadata to extra,
# if needed
instance_extra = db_inst.get('extra') or {}
if instance_extra.get('flavor') is not None:
self._flavor_from_db(db_inst['extra']['flavor'])
elif 'instance_type_id' in self.system_metadata:
self._migrate_flavor(self)
migrated_flavor = True
return migrated_flavor
@staticmethod
def _from_db_object(context, instance, db_inst, expected_attrs=None):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
instance._context = context
if expected_attrs is None:
expected_attrs = []
# Most of the field names match right now, so be quick
for field in instance.fields:
if field in INSTANCE_OPTIONAL_ATTRS:
continue
elif field == 'deleted':
instance.deleted = db_inst['deleted'] == db_inst['id']
elif field == 'cleaned':
instance.cleaned = db_inst['cleaned'] == 1
else:
instance[field] = db_inst[field]
# NOTE(danms): We can be called with a dict instead of a
# SQLAlchemy object, so we have to be careful here
if hasattr(db_inst, '__dict__'):
have_extra = 'extra' in db_inst.__dict__ and db_inst['extra']
else:
have_extra = 'extra' in db_inst and db_inst['extra']
if 'metadata' in expected_attrs:
instance['metadata'] = utils.instance_meta(db_inst)
if 'system_metadata' in expected_attrs:
instance['system_metadata'] = utils.instance_sys_meta(db_inst)
if 'fault' in expected_attrs:
instance['fault'] = (
objects.InstanceFault.get_latest_for_instance(
context, instance.uuid))
if 'numa_topology' in expected_attrs:
if have_extra:
instance._load_numa_topology(
db_inst['extra'].get('numa_topology'))
else:
instance.numa_topology = None
if 'pci_requests' in expected_attrs:
if have_extra:
instance._load_pci_requests(
db_inst['extra'].get('pci_requests'))
else:
instance.pci_requests = None
if 'vcpu_model' in expected_attrs:
if have_extra:
instance._load_vcpu_model(
db_inst['extra'].get('vcpu_model'))
else:
instance.vcpu_model = None
if 'ec2_ids' in expected_attrs:
instance._load_ec2_ids()
if 'info_cache' in expected_attrs:
if db_inst['info_cache'] is None:
instance.info_cache = None
elif not instance.obj_attr_is_set('info_cache'):
# TODO(danms): If this ever happens on a backlevel instance
# passed to us by a backlevel service, things will break
instance.info_cache = objects.InstanceInfoCache(context)
if instance.info_cache is not None:
instance.info_cache._from_db_object(context,
instance.info_cache,
db_inst['info_cache'])
migrated_flavor = instance._maybe_migrate_flavor(db_inst,
expected_attrs)
# TODO(danms): If we are updating these on a backlevel instance,
# we'll end up sending back new versions of these objects (see
# above note for new info_caches
if 'pci_devices' in expected_attrs:
pci_devices = base.obj_make_list(
context, objects.PciDeviceList(context),
objects.PciDevice, db_inst['pci_devices'])
instance['pci_devices'] = pci_devices
if 'security_groups' in expected_attrs:
sec_groups = base.obj_make_list(
context, objects.SecurityGroupList(context),
objects.SecurityGroup, db_inst['security_groups'])
instance['security_groups'] = sec_groups
if 'tags' in expected_attrs:
tags = base.obj_make_list(
context, objects.TagList(context),
objects.Tag, db_inst['tags'])
instance['tags'] = tags
instance.obj_reset_changes()
if migrated_flavor:
# NOTE(danms): If we migrated the flavor above, we need to make
# sure we know that flavor and system_metadata have been
# touched so that the next save will update them. We can remove
# this when we remove _migrate_flavor().
instance._changed_fields.add('system_metadata')
instance._changed_fields.add('flavor')
return instance
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable_classmethod
def get_by_id(cls, context, inst_id, expected_attrs=None):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get(context, inst_id,
columns_to_join=columns_to_join)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS
if attr in updates]
if 'security_groups' in updates:
updates['security_groups'] = [x.name for x in
updates['security_groups']]
if 'info_cache' in updates:
updates['info_cache'] = {
'network_info': updates['info_cache'].network_info.json()
}
updates['extra'] = {}
numa_topology = updates.pop('numa_topology', None)
if numa_topology:
expected_attrs.append('numa_topology')
updates['extra']['numa_topology'] = numa_topology._to_json()
pci_requests = updates.pop('pci_requests', None)
if pci_requests:
expected_attrs.append('pci_requests')
updates['extra']['pci_requests'] = (
pci_requests.to_json())
flavor = updates.pop('flavor', None)
if flavor:
expected_attrs.append('flavor')
old = ((self.obj_attr_is_set('old_flavor') and
self.old_flavor) and
self.old_flavor.obj_to_primitive() or None)
new = ((self.obj_attr_is_set('new_flavor') and
self.new_flavor) and
self.new_flavor.obj_to_primitive() or None)
flavor_info = {
'cur': self.flavor.obj_to_primitive(),
'old': old,
'new': new,
}
updates['extra']['flavor'] = jsonutils.dumps(flavor_info)
vcpu_model = updates.pop('vcpu_model', None)
if vcpu_model:
expected_attrs.append('vcpu_model')
updates['extra']['vcpu_model'] = (
jsonutils.dumps(vcpu_model.obj_to_primitive()))
db_inst = db.instance_create(self._context, updates)
self._from_db_object(self._context, self, db_inst, expected_attrs)
@base.remotable
def destroy(self):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
if not self.obj_attr_is_set('uuid'):
raise exception.ObjectActionError(action='destroy',
reason='no uuid')
if not self.obj_attr_is_set('host') or not self.host:
# NOTE(danms): If our host is not set, avoid a race
constraint = db.constraint(host=db.equal_any(None))
else:
constraint = None
try:
db_inst = db.instance_destroy(self._context, self.uuid,
constraint=constraint)
self._from_db_object(self._context, self, db_inst)
except exception.ConstraintNotMet:
raise exception.ObjectActionError(action='destroy',
reason='host changed')
delattr(self, base.get_attrname('id'))
def _save_info_cache(self, context):
if self.info_cache:
with self.info_cache.obj_alternate_context(context):
self.info_cache.save()
def _save_security_groups(self, context):
security_groups = self.security_groups or []
for secgroup in security_groups:
with secgroup.obj_alternate_context(context):
secgroup.save()
self.security_groups.obj_reset_changes()
def _save_fault(self, context):
# NOTE(danms): I don't think we need to worry about this, do we?
pass
def _save_numa_topology(self, context):
if self.numa_topology:
self.numa_topology.instance_uuid = self.uuid
with self.numa_topology.obj_alternate_context(context):
self.numa_topology._save()
else:
objects.InstanceNUMATopology.delete_by_instance_uuid(
context, self.uuid)
def _save_pci_requests(self, context):
# NOTE(danms): No need for this yet.
pass
def _save_pci_devices(self, context):
# NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
# permitted to update the DB. all change to devices from here will
# be dropped.
pass
def _save_flavor(self, context):
# FIXME(danms): We can do this smarterly by updating this
# with all the other extra things at the same time
flavor_info = {
'cur': self.flavor.obj_to_primitive(),
'old': (self.old_flavor and
self.old_flavor.obj_to_primitive() or None),
'new': (self.new_flavor and
self.new_flavor.obj_to_primitive() or None),
}
db.instance_extra_update_by_uuid(
context, self.uuid,
{'flavor': jsonutils.dumps(flavor_info)})
self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])
def _save_old_flavor(self, context):
if 'old_flavor' in self.obj_what_changed():
self._save_flavor(context)
def _save_new_flavor(self, context):
if 'new_flavor' in self.obj_what_changed():
self._save_flavor(context)
def _save_vcpu_model(self, context):
# TODO(yjiang5): should merge the db accesses for all the extra
# fields
if 'vcpu_model' in self.obj_what_changed():
if self.vcpu_model:
update = jsonutils.dumps(self.vcpu_model.obj_to_primitive())
else:
update = None
db.instance_extra_update_by_uuid(
context, self.uuid,
{'vcpu_model': update})
def _save_ec2_ids(self, context):
# NOTE(hanlind): Read-only so no need to save this.
pass
def _maybe_upgrade_flavor(self):
# NOTE(danms): We may have regressed to flavors stored in sysmeta,
# so we have to merge back in here. That could happen if we pass
# a converted instance to an older node, which still stores the
# flavor in sysmeta, which then calls save(). We need to not
# store that flavor info back into sysmeta after we've already
# converted it.
if (not self.obj_attr_is_set('system_metadata') or
'instance_type_id' not in self.system_metadata):
return
LOG.debug('Transforming legacy flavors on save', instance=self)
for ftype in ('', 'old_', 'new_'):
attr = '%sflavor' % ftype
try:
flavor = flavors.extract_flavor(self, prefix=ftype)
flavors.delete_flavor_info(self.system_metadata, ftype)
# NOTE(danms): This may trigger a lazy-load of the flavor
# information, but only once and it avoids re-fetching and
# re-migrating the original flavor.
getattr(self, attr).update(flavor)
except AttributeError:
setattr(self, attr, flavor)
except KeyError:
setattr(self, attr, None)
@base.remotable
def save(self, expected_vm_state=None,
expected_task_state=None, admin_state_reset=False):
"""Save updates to this instance
Column-wise updates will be made based on the result of
self.what_changed(). If expected_task_state is provided,
it will be checked against the in-database copy of the
instance before updates are made.
:param:context: Security context
:param:expected_task_state: Optional tuple of valid task states
for the instance to be in
:param:expected_vm_state: Optional tuple of valid vm states
for the instance to be in
:param admin_state_reset: True if admin API is forcing setting
of task_state/vm_state
"""
# Store this on the class because _cell_name_blocks_sync is useless
# after the db update call below.
self._sync_cells = not self._cell_name_blocks_sync()
context = self._context
cell_type = cells_opts.get_cell_type()
if cell_type == 'api' and self.cell_name:
# NOTE(comstud): We need to stash a copy of ourselves
# before any updates are applied. When we call the save
# methods on nested objects, we will lose any changes to
# them. But we need to make sure child cells can tell
# what is changed.
#
# We also need to nuke any updates to vm_state and task_state
# unless admin_state_reset is True. compute cells are
# authoritative for their view of vm_state and task_state.
stale_instance = self.obj_clone()
def _handle_cell_update_from_api():
if self._sync_cells:
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_from_api(context, stale_instance,
expected_vm_state,
expected_task_state,
admin_state_reset)
else:
stale_instance = None
self._maybe_upgrade_flavor()
updates = {}
changes = self.obj_what_changed()
for field in self.fields:
# NOTE(danms): For object fields, we construct and call a
# helper method like self._save_$attrname()
if (self.obj_attr_is_set(field) and
isinstance(self.fields[field], fields.ObjectField)):
try:
getattr(self, '_save_%s' % field)(context)
except AttributeError:
LOG.exception(_LE('No save handler for %s'), field,
instance=self)
elif field in changes:
if (field == 'cell_name' and self[field] is not None and
self[field].startswith(cells_utils.BLOCK_SYNC_FLAG)):
updates[field] = self[field].replace(
cells_utils.BLOCK_SYNC_FLAG, '', 1)
else:
updates[field] = self[field]
if not updates:
if stale_instance:
_handle_cell_update_from_api()
return
# Cleaned needs to be turned back into an int here
if 'cleaned' in updates:
if updates['cleaned']:
updates['cleaned'] = 1
else:
updates['cleaned'] = 0
if expected_task_state is not None:
if (self.VERSION == '1.9' and
expected_task_state == 'image_snapshot'):
# NOTE(danms): Icehouse introduced a pending state which
# Havana doesn't know about. If we're an old instance,
# tolerate the pending state as well
expected_task_state = [
expected_task_state, 'image_snapshot_pending']
updates['expected_task_state'] = expected_task_state
if expected_vm_state is not None:
updates['expected_vm_state'] = expected_vm_state
expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
if self.obj_attr_is_set(attr)]
if 'pci_devices' in expected_attrs:
# NOTE(danms): We don't refresh pci_devices on save right now
expected_attrs.remove('pci_devices')
# NOTE(alaski): We need to pull system_metadata for the
# notification.send_update() below. If we don't there's a KeyError
# when it tries to extract the flavor.
# NOTE(danms): If we have sysmeta, we need flavor since the caller
# might be expecting flavor information as a result
if 'system_metadata' not in expected_attrs:
expected_attrs.append('system_metadata')
expected_attrs.append('flavor')
old_ref, inst_ref = db.instance_update_and_get_original(
context, self.uuid, updates, update_cells=False,
columns_to_join=_expected_cols(expected_attrs))
self._from_db_object(context, self, inst_ref,
expected_attrs=expected_attrs)
# NOTE(danms): We have to be super careful here not to trigger
# any lazy-loads that will unmigrate or unbackport something. So,
# make a copy of the instance for notifications first.
new_ref = self.obj_clone()
if stale_instance:
_handle_cell_update_from_api()
elif cell_type == 'compute':
if self._sync_cells:
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_at_top(context,
base.obj_to_primitive(new_ref))
notifications.send_update(context, old_ref, new_ref)
self.obj_reset_changes()
@base.remotable
def refresh(self, use_slave=False):
extra = [field for field in INSTANCE_OPTIONAL_ATTRS
if self.obj_attr_is_set(field)]
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid,
expected_attrs=extra,
use_slave=use_slave)
# NOTE(danms): We orphan the instance copy so we do not unexpectedly
# trigger a lazy-load (which would mean we failed to calculate the
# expected_attrs properly)
current._context = None
for field in self.fields:
if self.obj_attr_is_set(field):
if field == 'info_cache':
self.info_cache.refresh()
elif self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
def _load_generic(self, attrname):
instance = self.__class__.get_by_uuid(self._context,
uuid=self.uuid,
expected_attrs=[attrname])
# NOTE(danms): Never allow us to recursively-load
if instance.obj_attr_is_set(attrname):
self[attrname] = instance[attrname]
else:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='loading %s requires recursion' % attrname)
def _load_fault(self):
self.fault = objects.InstanceFault.get_latest_for_instance(
self._context, self.uuid)
def _load_numa_topology(self, db_topology=None):
if db_topology is not None:
self.numa_topology = \
objects.InstanceNUMATopology.obj_from_db_obj(self.uuid,
db_topology)
else:
try:
self.numa_topology = \
objects.InstanceNUMATopology.get_by_instance_uuid(
self._context, self.uuid)
except exception.NumaTopologyNotFound:
self.numa_topology = None
def _load_pci_requests(self, db_requests=None):
# FIXME: also do this if none!
if db_requests is not None:
self.pci_requests = objects.InstancePCIRequests.obj_from_db(
self._context, self.uuid, db_requests)
else:
self.pci_requests = \
objects.InstancePCIRequests.get_by_instance_uuid(
self._context, self.uuid)
def _load_flavor(self):
try:
instance = self.__class__.get_by_uuid(
self._context, uuid=self.uuid,
expected_attrs=['flavor', 'system_metadata'])
except exception.InstanceNotFound:
# NOTE(danms): Before we had instance types in system_metadata,
# we just looked up the instance_type_id. Since we could still
# have an instance in the database that doesn't have either
# newer setup, mirror the original behavior here if the instance
# is deleted
if not self.deleted:
raise
self.flavor = objects.Flavor.get_by_id(self._context,
self.instance_type_id)
self.old_flavor = None
self.new_flavor = None
return
# NOTE(danms): Orphan the instance to make sure we don't lazy-load
# anything below
instance._context = None
self.flavor = instance.flavor
self.old_flavor = instance.old_flavor
self.new_flavor = instance.new_flavor
# NOTE(danms): The query above may have migrated the flavor from
# system_metadata. Since we have it anyway, go ahead and refresh
# our system_metadata from it so that a save will be accurate.
instance.system_metadata.update(self.get('system_metadata', {}))
self.system_metadata = instance.system_metadata
def _load_vcpu_model(self, db_vcpu_model=None):
if db_vcpu_model is None:
self.vcpu_model = objects.VirtCPUModel.get_by_instance_uuid(
self._context, self.uuid)
else:
db_vcpu_model = jsonutils.loads(db_vcpu_model)
self.vcpu_model = objects.VirtCPUModel.obj_from_primitive(
db_vcpu_model)
def _load_ec2_ids(self):
self.ec2_ids = objects.EC2Ids.get_by_instance(self._context, self)
def obj_load_attr(self, attrname):
if attrname not in INSTANCE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if ('flavor' in attrname and
self.obj_attr_is_set('system_metadata') and
'instance_type_id' in self.system_metadata):
# NOTE(danms): Looks like we're loading a flavor, and that
# should be doable without a context, so do this before the
# orphan check below.
self._migrate_flavor(self)
if self.obj_attr_is_set(attrname):
return
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
# NOTE(danms): We handle some fields differently here so that we
# can be more efficient
if attrname == 'fault':
self._load_fault()
elif attrname == 'numa_topology':
self._load_numa_topology()
elif attrname == 'pci_requests':
self._load_pci_requests()
elif attrname == 'vcpu_model':
self._load_vcpu_model()
elif attrname == 'ec2_ids':
self._load_ec2_ids()
elif 'flavor' in attrname:
self._load_flavor()
else:
# FIXME(comstud): This should be optimized to only load the attr.
self._load_generic(attrname)
self.obj_reset_changes([attrname])
def get_flavor(self, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
attr = '%sflavor' % prefix
try:
return getattr(self, attr)
except exception.FlavorNotFound:
# NOTE(danms): This only happens in the case where we don't
# have flavor information in sysmeta or extra, and doing
# this triggers a lookup based on our instance_type_id for
# (very) legacy instances. That legacy code expects a None here,
# so emulate it for this helper, even though the actual attribute
# is not nullable.
return None
def set_flavor(self, flavor, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
attr = '%sflavor' % prefix
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
setattr(self, attr, flavor)
self.save()
def delete_flavor(self, namespace):
prefix = ('%s_' % namespace) if namespace else ''
attr = '%sflavor' % prefix
setattr(self, attr, None)
self.save()
@base.remotable
def delete_metadata_key(self, key):
"""Optimized metadata delete method.
This provides a more efficient way to delete a single metadata
key, instead of just calling instance.save(). This should be called
with the key still present in self.metadata, which it will update
after completion.
"""
db.instance_metadata_delete(self._context, self.uuid, key)
md_was_changed = 'metadata' in self.obj_what_changed()
del self.metadata[key]
self._orig_metadata.pop(key, None)
notifications.send_update(self._context, self, self)
if not md_was_changed:
self.obj_reset_changes(['metadata'])
def _cell_name_blocks_sync(self):
if (self.obj_attr_is_set('cell_name') and
self.cell_name is not None and
self.cell_name.startswith(cells_utils.BLOCK_SYNC_FLAG)):
return True
return False
def _normalize_cell_name(self):
"""Undo skip_cell_sync()'s cell_name modification if applied"""
if not self.obj_attr_is_set('cell_name') or self.cell_name is None:
return
cn_changed = 'cell_name' in self.obj_what_changed()
if self.cell_name.startswith(cells_utils.BLOCK_SYNC_FLAG):
self.cell_name = self.cell_name.replace(
cells_utils.BLOCK_SYNC_FLAG, '', 1)
# cell_name is not normally an empty string, this means it was None
# or unset before cells_utils.BLOCK_SYNC_FLAG was applied.
if len(self.cell_name) == 0:
self.cell_name = None
if not cn_changed:
self.obj_reset_changes(['cell_name'])
@contextlib.contextmanager
def skip_cells_sync(self):
"""Context manager to save an instance without syncing cells.
Temporarily disables the cells syncing logic, if enabled. This should
only be used when saving an instance that has been passed down/up from
another cell in order to avoid passing it back to the originator to be
re-saved.
"""
cn_changed = 'cell_name' in self.obj_what_changed()
if not self.obj_attr_is_set('cell_name') or self.cell_name is None:
self.cell_name = ''
self.cell_name = '%s%s' % (cells_utils.BLOCK_SYNC_FLAG, self.cell_name)
if not cn_changed:
self.obj_reset_changes(['cell_name'])
try:
yield
finally:
self._normalize_cell_name()
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
get_fault = expected_attrs and 'fault' in expected_attrs
inst_faults = {}
if get_fault:
# Build an instance_uuid:latest-fault mapping
expected_attrs.remove('fault')
instance_uuids = [inst['uuid'] for inst in db_inst_list]
faults = objects.InstanceFaultList.get_by_instance_uuids(
context, instance_uuids)
for fault in faults:
if fault.instance_uuid not in inst_faults:
inst_faults[fault.instance_uuid] = fault
inst_list.objects = []
for db_inst in db_inst_list:
inst_obj = objects.Instance._from_db_object(
context, objects.Instance(context), db_inst,
expected_attrs=expected_attrs)
if get_fault:
inst_obj.fault = inst_faults.get(inst_obj.uuid, None)
inst_list.objects.append(inst_obj)
inst_list.obj_reset_changes()
return inst_list
class InstanceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added use_slave to get_by_host
# Instance <= version 1.9
# Version 1.2: Instance <= version 1.11
# Version 1.3: Added use_slave to get_by_filters
# Version 1.4: Instance <= version 1.12
# Version 1.5: Added method get_active_by_window_joined.
# Version 1.6: Instance <= version 1.13
# Version 1.7: Added use_slave to get_active_by_window_joined
# Version 1.8: Instance <= version 1.14
# Version 1.9: Instance <= version 1.15
# Version 1.10: Instance <= version 1.16
# Version 1.11: Added sort_keys and sort_dirs to get_by_filters
# Version 1.12: Pass expected_attrs to instance_get_active_by_window_joined
# Version 1.13: Instance <= version 1.17
# Version 1.14: Instance <= version 1.18
# Version 1.15: Instance <= version 1.19
# Version 1.16: Added get_all() method
# Version 1.17: Instance <= version 1.20
VERSION = '1.17'
fields = {
'objects': fields.ListOfObjectsField('Instance'),
}
child_versions = {
'1.1': '1.9',
# NOTE(danms): Instance was at 1.9 before we added this
'1.2': '1.11',
'1.3': '1.11',
'1.4': '1.12',
'1.5': '1.12',
'1.6': '1.13',
'1.7': '1.13',
'1.8': '1.14',
'1.9': '1.15',
'1.10': '1.16',
'1.11': '1.16',
'1.12': '1.16',
'1.13': '1.17',
'1.14': '1.18',
'1.15': '1.19',
'1.16': '1.19',
'1.17': '1.20',
}
@base.remotable_classmethod
def get_by_filters(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
marker=None, expected_attrs=None, use_slave=False,
sort_keys=None, sort_dirs=None):
if sort_keys or sort_dirs:
db_inst_list = db.instance_get_all_by_filters_sort(
context, filters, limit=limit, marker=marker,
columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
db_inst_list = db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, limit=limit,
marker=marker, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host(cls, context, host, expected_attrs=None, use_slave=False):
db_inst_list = db.instance_get_all_by_host(
context, host, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_node(cls, context, host, node, expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_node(
context, host, node,
columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_not_type(cls, context, host, type_id=None,
expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_not_type(
context, host, type_id=type_id)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_all(cls, context, expected_attrs=None):
"""Returns all instances on all nodes."""
db_instances = db.instance_get_all(
context, columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_instances,
expected_attrs)
@base.remotable_classmethod
def get_hung_in_rebooting(cls, context, reboot_window,
expected_attrs=None):
db_inst_list = db.instance_get_all_hung_in_rebooting(context,
reboot_window)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def _get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None,
use_slave=False):
# NOTE(mriedem): We need to convert the begin/end timestamp strings
# to timezone-aware datetime objects for the DB API call.
begin = timeutils.parse_isotime(begin)
end = timeutils.parse_isotime(end) if end else None
db_inst_list = db.instance_get_active_by_window_joined(
context, begin, end, project_id, host,
columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@classmethod
def get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None,
use_slave=False):
"""Get instances and joins active during a certain time window.
:param:context: nova request context
:param:begin: datetime for the start of the time window
:param:end: datetime for the end of the time window
:param:project_id: used to filter instances by project
:param:host: used to filter instances on a given compute host
:param:expected_attrs: list of related fields that can be joined
in the database layer when querying for instances
:param use_slave if True, ship this query off to a DB slave
:returns: InstanceList
"""
# NOTE(mriedem): We have to convert the datetime objects to string
# primitives for the remote call.
begin = timeutils.isotime(begin)
end = timeutils.isotime(end) if end else None
return cls._get_active_by_window_joined(context, begin, end,
project_id, host,
expected_attrs,
use_slave=use_slave)
@base.remotable_classmethod
def get_by_security_group_id(cls, context, security_group_id):
db_secgroup = db.security_group_get(
context, security_group_id,
columns_to_join=['instances.info_cache',
'instances.system_metadata'])
return _make_instance_list(context, cls(), db_secgroup['instances'],
['info_cache', 'system_metadata'])
@classmethod
def get_by_security_group(cls, context, security_group):
return cls.get_by_security_group_id(context, security_group.id)
def fill_faults(self):
"""Batch query the database for our instances' faults.
:returns: A list of instance uuids for which faults were found.
"""
uuids = [inst.uuid for inst in self]
faults = objects.InstanceFaultList.get_by_instance_uuids(
self._context, uuids)
faults_by_uuid = {}
for fault in faults:
if fault.instance_uuid not in faults_by_uuid:
faults_by_uuid[fault.instance_uuid] = fault
for instance in self:
if instance.uuid in faults_by_uuid:
instance.fault = faults_by_uuid[instance.uuid]
else:
# NOTE(danms): Otherwise the caller will cause a lazy-load
# when checking it, and we know there are none
instance.fault = None
instance.obj_reset_changes(['fault'])
return faults_by_uuid.keys()
|
|
from __future__ import unicode_literals
import datetime
import uuid
from copy import deepcopy
from django.core.exceptions import FieldError
from django.db import DatabaseError, connection, models, transaction
from django.db.models import TimeField, UUIDField
from django.db.models.aggregates import (
Avg, Count, Max, Min, StdDev, Sum, Variance,
)
from django.db.models.expressions import (
F, Case, Col, Date, DateTime, ExpressionWrapper, Func, OrderBy, Random,
RawSQL, Ref, Value, When,
)
from django.db.models.functions import (
Coalesce, Concat, Length, Lower, Substr, Upper,
)
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from django.utils.timezone import utc
from .models import UUID, Company, Employee, Experiment, Number, Time
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10)
)
Company.objects.create(
name="Foobar Ltd.", num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20)
)
Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Max", lastname="Mustermann", salary=30)
)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by(
"name", "num_employees", "num_chairs"
)
def test_annotate_values_aggregate(self):
companies = Company.objects.annotate(
salaries=F('ceo__salary'),
).values('num_employees', 'salaries').aggregate(
result=Sum(F('salaries') + F('num_employees'),
output_field=models.IntegerField()),
)
self.assertEqual(companies['result'], 2395)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertQuerysetEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")), [
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{
"num_chairs": 1,
"name": "Test GmbH",
"num_employees": 32
},
],
lambda o: o
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertQuerysetEqual(
self.company_query, [
{
"num_chairs": 2300,
"name": "Example Inc.",
"num_employees": 2300
},
{
"num_chairs": 3,
"name": "Foobar Ltd.",
"num_employees": 3
},
{
"num_chairs": 32,
"name": "Test GmbH",
"num_employees": 32
}
],
lambda o: o
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 2302,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 5,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 34,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_order_of_operations(self):
# Law of order of operations is followed
self. company_query.update(
num_chairs=F('num_employees') + 2 * F('num_employees')
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 6900,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 9,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 96,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(
num_chairs=((F('num_employees') + 2) * F('num_employees'))
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 5294600,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 15,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 1088,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(
Company.objects.update(point_of_contact=F('ceo')),
3
)
self.assertQuerysetEqual(
Company.objects.all(), [
"Joe Smith",
"Frank Meyer",
"Max Mustermann",
],
lambda c: six.text_type(c.point_of_contact),
ordered=False
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(), [
None,
None,
],
lambda n: n.float,
ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F('ceo'))
c = Company.objects.all()[0]
c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), [
"Foobar Ltd.",
"Test GmbH",
],
lambda c: c.name,
ordered=False
)
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name="foo")
self.assertEqual(
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
"foo",
)
with transaction.atomic():
with self.assertRaises(FieldError):
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name=F('point_of_contact__lastname'))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name="Test GmbH")
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F("num_employees") + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(name="Test GmbH")
def test():
test_gmbh.point_of_contact = F("ceo")
self.assertRaises(ValueError, test)
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__last_name")
self.assertRaises(FieldError, test_gmbh.save)
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
test_gmbh = Company.objects.get(name="Test GmbH")
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5,
ceo=test_gmbh.ceo
)
acme.num_employees = F("num_employees") + 16
self.assertRaises(TypeError, acme.save)
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F('lastname'))
self.assertQuerysetEqual(queryset, ["<Employee: Test test>"])
@skipIfDBFeature('has_case_insensitive_like')
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertQuerysetEqual(
Employee.objects.filter(lastname__startswith=F('firstname')),
[e2], lambda x: x)
self.assertQuerysetEqual(
Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk'),
[e2, e3], lambda x: x)
def test_ticket_18375_join_reuse(self):
# Test that reverse multijoin F() references and the lookup target
# the same join. Pre #18375 the F() join was generated first, and the
# lookup couldn't reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'),
company_ceo_set__num_chairs__gte=1)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk'),
pk=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_chained_filters(self):
# Test that F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk')
).filter(
company_ceo_set__num_employees=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
class ExpressionsTests(TestCase):
def test_F_object_deepcopy(self):
"""
Make sure F objects can be deepcopied (#23492)
"""
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_f_reuse(self):
f = F('id')
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%Joh\\n"),
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="John"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__contains=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__startswith=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__endswith=F('lastname')),
["<Employee: Jean-Claude Claude>"],
ordered=False)
def test_insensitive_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%joh\\n"),
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="john"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__icontains=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__istartswith=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__iendswith=F('lastname')),
["<Employee: Jean-Claude claude>"],
ordered=False)
class ExpressionsNumericTests(TestCase):
def setUp(self):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
self.assertEqual(Number.objects.update(float=F('integer')), 3)
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 42, 42.000>',
'<Number: 1337, 1337.000>'
],
ordered=False
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.exclude(float=F('integer')),
[
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(Number.objects.filter(pk=n.pk).update(
float=F('integer') + F('float') * 2), 1)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
def test_incorrect_field_expression(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword u?'nope' into field.*"):
list(Employee.objects.filter(firstname=F('nope')))
class ExpressionOperatorTests(TestCase):
def setUp(self):
self.n = Number.objects.create(integer=42, float=15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F('integer') + 15,
float=F('float') + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15,
float=F('float') - 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15,
float=F('float') * 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2,
float=F('float') / 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
@skipUnlessDBFeature('supports_bitwise_or')
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_power(self):
# LH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2,
float=F('float') ** 1.5)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'),
float=42.7 + F('float'))
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'),
float=42.7 - F('float'))
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'),
float=42.7 * F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'),
float=42.7 / F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_righthand_power(self):
# RH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'),
float=1.5 ** F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3))
class FTimeDeltaTests(TestCase):
def setUp(self):
self.sday = sday = datetime.date(2010, 6, 25)
self.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
# Test data is set so that deltas and delays will be
# strictly increasing.
self.deltas = []
self.delays = []
self.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
e0 = Experiment.objects.create(name='e0', assigned=sday, start=stime,
end=end, completed=end.date(), estimated_time=delta0)
self.deltas.append(delta0)
self.delays.append(e0.start -
datetime.datetime.combine(e0.assigned, midnight))
self.days_long.append(e0.completed - e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite. This Experiment is only
# included in the test data when the DB supports microsecond
# precision.
if connection.features.supports_microsecond_precision:
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(name='e1', assigned=sday,
start=stime + delay, end=end, completed=end.date(), estimated_time=delta1)
self.deltas.append(delta1)
self.delays.append(e1.start -
datetime.datetime.combine(e1.assigned, midnight))
self.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(name='e2',
assigned=sday - datetime.timedelta(3), start=stime, end=end,
completed=end.date(), estimated_time=datetime.timedelta(hours=1))
self.deltas.append(delta2)
self.delays.append(e2.start -
datetime.datetime.combine(e2.assigned, midnight))
self.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(name='e3',
assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3)
self.deltas.append(delta3)
self.delays.append(e3.start -
datetime.datetime.combine(e3.assigned, midnight))
self.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(name='e4',
assigned=sday - datetime.timedelta(10), start=stime, end=end,
completed=end.date(), estimated_time=delta4 - datetime.timedelta(1))
self.deltas.append(delta4)
self.delays.append(e4.start -
datetime.datetime.combine(e4.assigned, midnight))
self.days_long.append(e4.completed - e4.assigned)
self.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643
qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
def test_delta_add(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lt=delta + F('start'))]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_subtract(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(start__gt=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__gte=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_exclude(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.exclude(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i:])
test_set = [e.name for e in
Experiment.objects.exclude(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i + 1:])
def test_date_comparison(self):
for i in range(len(self.days_long)):
days = self.days_long[i]
test_set = [e.name for e in
Experiment.objects.filter(completed__lt=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(completed__lte=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i + 1])
@skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
def test_mixed_comparisons1(self):
for i in range(len(self.delays)):
delay = self.delays[i]
if not connection.features.supports_microsecond_precision:
delay = datetime.timedelta(delay.days, delay.seconds)
test_set = [e.name for e in
Experiment.objects.filter(assigned__gt=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(assigned__gte=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_mixed_comparisons2(self):
delays = [datetime.timedelta(delay.days) for delay in self.delays]
for i in range(len(delays)):
delay = delays[i]
test_set = [e.name for e in
Experiment.objects.filter(start__lt=F('assigned') + delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__lte=F('assigned') + delay +
datetime.timedelta(1))]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_update(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F('start') + delta, end=F('end') + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [e.name for e in
Experiment.objects.filter(start=F('start') + F('estimated_time'))]
self.assertEqual(zeros, ['e0'])
end_less = [e.name for e in
Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))]
self.assertEqual(end_less, ['e2'])
delta_math = [e.name for e in
Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1))]
self.assertEqual(delta_math, ['e4'])
@skipUnlessDBFeature("has_native_duration_field")
def test_date_subtraction(self):
under_estimate = [e.name for e in
Experiment.objects.filter(estimated_time__gt=F('end') - F('start'))]
self.assertEqual(under_estimate, ['e2'])
over_estimate = [e.name for e in
Experiment.objects.filter(estimated_time__lt=F('end') - F('start'))]
self.assertEqual(over_estimate, ['e4'])
def test_duration_with_datetime(self):
# Exclude e1 which has very high precision so we can test this on all
# backends regardless of whether or not it supports
# microsecond_precision.
over_estimate = Experiment.objects.exclude(name='e1').filter(
completed__gt=self.stime + F('estimated_time'),
).order_by('name')
self.assertQuerysetEqual(over_estimate, ['e3', 'e4'], lambda e: e.name)
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField()))
self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012'))
class ReprTests(TestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>"
)
self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)")
self.assertEqual(repr(Date('published', 'exact')), "Date(published, exact)")
self.assertEqual(repr(DateTime('published', 'exact', utc)), "DateTime(published, exact, %s)" % utc)
self.assertEqual(repr(F('published')), "F(published)")
self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>")
self.assertEqual(
repr(ExpressionWrapper(F('cost') + F('tax'), models.IntegerField())),
"ExpressionWrapper(F(cost) + F(tax))"
)
self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)")
self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)')
self.assertEqual(repr(Random()), "Random()")
self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])")
self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))")
self.assertEqual(repr(Value(1)), "Value(1)")
def test_functions(self):
self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length('a')), "Length(F(a))")
self.assertEqual(repr(Lower('a')), "Lower(F(a))")
self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper('a')), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg('a')), "Avg(F(a))")
self.assertEqual(repr(Count('a')), "Count(F(a), distinct=False)")
self.assertEqual(repr(Max('a')), "Max(F(a))")
self.assertEqual(repr(Min('a')), "Min(F(a))")
self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum('a')), "Sum(F(a))")
self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)")
|
|
import logging
import asyncio
import typing
import binascii
from lbry.utils import resolve_host
from lbry.dht import constants
from lbry.dht.protocol.distance import Distance
from lbry.dht.protocol.iterative_find import IterativeNodeFinder, IterativeValueFinder
from lbry.dht.protocol.protocol import KademliaProtocol
from lbry.dht.peer import KademliaPeer
if typing.TYPE_CHECKING:
from lbry.dht.peer import PeerManager
log = logging.getLogger(__name__)
class Node:
def __init__(self, loop: asyncio.BaseEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.rpc_timeout,
split_buckets_under_index: int = constants.split_buckets_under_index):
self.loop = loop
self.internal_udp_port = internal_udp_port
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
split_buckets_under_index)
self.listening_port: asyncio.DatagramTransport = None
self.joined = asyncio.Event(loop=self.loop)
self._join_task: asyncio.Task = None
self._refresh_task: asyncio.Task = None
async def refresh_node(self, force_once=False):
while True:
# remove peers with expired blob announcements from the datastore
self.protocol.data_store.removed_expired_peers()
total_peers: typing.List['KademliaPeer'] = []
# add all peers in the routing table
total_peers.extend(self.protocol.routing_table.get_peers())
# add all the peers who have announed blobs to us
total_peers.extend(self.protocol.data_store.get_storing_contacts())
# get ids falling in the midpoint of each bucket that hasn't been recently updated
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
# populate/split the buckets further
buckets_with_contacts = self.protocol.routing_table.buckets_with_contacts()
if buckets_with_contacts <= 3:
for i in range(buckets_with_contacts):
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
if self.protocol.routing_table.get_peers():
# if we have node ids to look up, perform the iterative search until we have k results
while node_ids:
peers = await self.peer_search(node_ids.pop())
total_peers.extend(peers)
else:
if force_once:
break
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(constants.refresh_interval // 4, fut.set_result, None)
await fut
continue
# ping the set of peers; upon success/failure the routing able and last replied/failed time will be updated
to_ping = [peer for peer in set(total_peers) if self.protocol.peer_manager.peer_is_good(peer) is not True]
if to_ping:
self.protocol.ping_queue.enqueue_maybe_ping(*to_ping, delay=0)
if force_once:
break
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(constants.refresh_interval, fut.set_result, None)
await fut
async def announce_blob(self, blob_hash: str) -> typing.List[bytes]:
hash_value = binascii.unhexlify(blob_hash.encode())
assert len(hash_value) == constants.hash_length
peers = await self.peer_search(hash_value)
if not self.protocol.external_ip:
raise Exception("Cannot determine external IP")
log.debug("Store to %i peers", len(peers))
for peer in peers:
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
stored_to_tup = await asyncio.gather(
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop
)
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
if stored_to:
log.info("Stored %s to %i of %i attempted peers", binascii.hexlify(hash_value).decode()[:8],
len(stored_to), len(peers))
else:
log.warning("Failed announcing %s, stored to 0 peers", blob_hash[:8])
return stored_to
def stop(self) -> None:
if self.joined.is_set():
self.joined.clear()
if self._join_task:
self._join_task.cancel()
if self._refresh_task and not (self._refresh_task.done() or self._refresh_task.cancelled()):
self._refresh_task.cancel()
if self.protocol and self.protocol.ping_queue.running:
self.protocol.ping_queue.stop()
self.protocol.stop()
if self.listening_port is not None:
self.listening_port.close()
self._join_task = None
self.listening_port = None
log.info("Stopped DHT node")
async def start_listening(self, interface: str = '') -> None:
if not self.listening_port:
self.listening_port, _ = await self.loop.create_datagram_endpoint(
lambda: self.protocol, (interface, self.internal_udp_port)
)
log.info("DHT node listening on UDP %s:%i", interface, self.internal_udp_port)
self.protocol.start()
else:
log.warning("Already bound to port %s", self.listening_port)
async def join_network(self, interface: typing.Optional[str] = '',
known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
if not self.listening_port:
await self.start_listening(interface)
self.protocol.ping_queue.start()
self._refresh_task = self.loop.create_task(self.refresh_node())
# resolve the known node urls
known_node_addresses = []
url_to_addr = {}
if known_node_urls:
for host, port in known_node_urls:
address = await resolve_host(host, port, proto='udp')
if (address, port) not in known_node_addresses and\
(address, port) != (self.protocol.external_ip, self.protocol.udp_port):
known_node_addresses.append((address, port))
url_to_addr[address] = host
if known_node_addresses:
peers = [
KademliaPeer(self.loop, address, udp_port=port)
for (address, port) in known_node_addresses
]
while True:
if not self.protocol.routing_table.get_peers():
if self.joined.is_set():
self.joined.clear()
self.protocol.peer_manager.reset()
self.protocol.ping_queue.enqueue_maybe_ping(*peers, delay=0.0)
peers.extend(await self.peer_search(self.protocol.node_id, shortlist=peers, count=32))
if self.protocol.routing_table.get_peers():
self.joined.set()
log.info(
"Joined DHT, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()),
self.protocol.routing_table.buckets_with_contacts())
else:
continue
await asyncio.sleep(1, loop=self.loop)
log.info("Joined DHT, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()),
self.protocol.routing_table.buckets_with_contacts())
self.joined.set()
def start(self, interface: str, known_node_urls: typing.List[typing.Tuple[str, int]]):
self._join_task = self.loop.create_task(
self.join_network(
interface=interface, known_node_urls=known_node_urls
)
)
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
bottom_out_limit: int = constants.bottom_out_limit,
max_results: int = constants.k) -> IterativeNodeFinder:
return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
key, bottom_out_limit, max_results, None, shortlist)
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
bottom_out_limit: int = 40,
max_results: int = -1) -> IterativeValueFinder:
return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
key, bottom_out_limit, max_results, None, shortlist)
async def peer_search(self, node_id: bytes, count=constants.k, max_results=constants.k*2,
bottom_out_limit=20, shortlist: typing.Optional[typing.List['KademliaPeer']] = None
) -> typing.List['KademliaPeer']:
peers = []
async for iteration_peers in self.get_iterative_node_finder(
node_id, shortlist=shortlist, bottom_out_limit=bottom_out_limit, max_results=max_results):
peers.extend(iteration_peers)
distance = Distance(node_id)
peers.sort(key=lambda peer: distance(peer.node_id))
return peers[:count]
async def _accumulate_search_junction(self, search_queue: asyncio.Queue,
result_queue: asyncio.Queue):
tasks = []
try:
while True:
blob_hash = await search_queue.get()
tasks.append(self.loop.create_task(self._value_producer(blob_hash, result_queue)))
finally:
for task in tasks:
task.cancel()
async def _value_producer(self, blob_hash: str, result_queue: asyncio.Queue):
async for results in self.get_iterative_value_finder(binascii.unhexlify(blob_hash.encode())):
result_queue.put_nowait(results)
def accumulate_peers(self, search_queue: asyncio.Queue,
peer_queue: typing.Optional[asyncio.Queue] = None) -> typing.Tuple[
asyncio.Queue, asyncio.Task]:
q = peer_queue or asyncio.Queue(loop=self.loop)
return q, self.loop.create_task(self._accumulate_search_junction(search_queue, q))
|
|
'''
Action Bar
==========
.. versionadded:: 1.8.0
.. image:: images/actionbar.png
:align: right
The ActionBar widget is like Android's ActionBar, where items are stacked
horizontally.
The :class:`ActionBar` will contain one :class:`ActionView` and many
:class:`ContextualActionView`\s.
An :class:`ActionView` will contain an :class:`ActionPrevious` having title,
app_icon and previous_icon properties. An :class:`ActionView` will contain
subclasses of :class:`ActionItem`\s. Some predefined ones inlcude an
:class:`ActionButton`, an :class:`ActionToggleButton`, an :class:`ActionCheck`,
an :class:`ActionSeparator` and an :class:`ActionGroup`.
An :class:`ActionGroup` is used to display :class:`ActionItem`\s in a group.
An :class:`ActionView` will always display an :class:`ActionGroup` after other
:class:`ActionItem`\s.
An :class:`ActionView` will contain an :class:`ActionOverflow`.
A :class:`ContextualActionView` is a subclass of an :class:`ActionView`.
'''
__all__ = ('ActionBarException', 'ActionItem', 'ActionButton',
'ActionToggleButton', 'ActionCheck', 'ActionSeparator',
'ActionDropDown', 'ActionGroup', 'ActionOverflow',
'ActionView', 'ContextualActionView', 'ActionPrevious',
'ActionBar')
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.dropdown import DropDown
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.checkbox import CheckBox
from kivy.uix.spinner import Spinner
from kivy.config import Config
from kivy.properties import ObjectProperty, NumericProperty, \
BooleanProperty, StringProperty, ListProperty, OptionProperty
from kivy.metrics import sp
from kivy.lang import Builder
from functools import partial
window_icon = ''
if Config:
window_icon = Config.get('kivy', 'window_icon')
class ActionBarException(Exception):
'''ActionBarException class
'''
pass
class ActionItem(object):
'''ActionItem class, an abstract class for all ActionBar widgets. To create
a custom widget for an ActionBar, inherit from this
class. See module documentation for more information.
'''
minimum_width = NumericProperty('90sp')
'''Minimum Width required by an ActionItem.
:data:`minimum_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to '90sp'.
'''
important = BooleanProperty(False)
'''Determines if an ActionItem is important or not.
:data:`important` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
inside_group = BooleanProperty(False)
'''(internal) Determines if an ActionItem is displayed inside an
ActionGroup or not.
:data:`inside_group` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
background_normal = StringProperty(
'atlas://data/images/defaulttheme/action_item')
'''Background image of the ActionItem used for the default graphical
representation when the ActionItem is not pressed.
:data:`background_normal` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/action_item'.
'''
background_down = StringProperty(
'atlas://data/images/defaulttheme/action_item_down')
'''Background image of the ActionItem used for default graphical
representation when an ActionItem is pressed.
:data:`background_down` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/action_item_down'.
'''
mipmap = BooleanProperty(True)
'''Defines whether the image/icon dispayed on top of the button uses a
mipmap or not.
:data:`mipmap` is a :class:`~kivy.properties.BooleanProperty` and defaults
to `True`.
'''
class ActionButton(Button, ActionItem):
'''ActionButton class, see module documentation for more information.
The text color, width and size_hint_x are set manually via the Kv language
file. It covers a lot of cases: with/without an icon, with/without a group
and takes care of the padding between elements.
You don't have much control over these properties, so if you want to
customize it's appearance, we suggest you create you own button
representation. You can do this by creating a class that subclasses an
existing widget and an :class:`ActionItem`::
class MyOwnActionButton(Button, ActionItem):
pass
You can then create your own style using the Kv language.
'''
icon = StringProperty(None, allownone=True)
'''Source image to use when the Button is part of the ActionBar. If the
Button is in a group, the text will be preferred.
'''
class ActionPrevious(ActionButton):
'''ActionPrevious class, see module documentation for more information.
'''
with_previous = BooleanProperty(True)
'''Specifies whether clicking on ActionPrevious will load the previous
screen or not. If True, the previous_icon will be shown otherwise it
will not.
:data:`with_previous` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
app_icon = StringProperty(window_icon)
'''Application icon for the ActionView.
:data:`app_icon` is a :class:`~kivy.properties.StringProperty`
and defaults to the window icon if set, otherwise
'data/logo/kivy-icon-32.png'.
'''
previous_image = StringProperty(
'atlas://data/images/defaulttheme/previous_normal')
'''Image for the 'previous' ActionButtons default graphical representation.
:data:`previous_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/previous_normal'.
'''
title = StringProperty('')
'''Title for ActionView.
:data:`title` is a :class:`~kivy.properties.StringProperty` and
defaults to ''.
'''
def __init__(self, **kwargs):
super(ActionPrevious, self).__init__(**kwargs)
if not self.app_icon:
self.app_icon = 'data/logo/kivy-icon-32.png'
class ActionToggleButton(ActionItem, ToggleButton):
'''ActionToggleButton class, see module documentation for more information.
'''
icon = StringProperty(None, allownone=True)
'''Source image to use when the Button is part of the ActionBar. If the
Button is in a group, the text will be preferred.
'''
class ActionCheck(ActionItem, CheckBox):
'''ActionCheck class, see module documentation for more information.
'''
pass
class ActionSeparator(ActionItem, Widget):
'''ActionSeparator class, see module documentation for more information.
'''
background_image = StringProperty(
'atlas://data/images/defaulttheme/separator')
'''Background image for the separators default graphical representation.
:data:`background_image` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/separator'.
'''
class ActionDropDown(DropDown):
'''ActionDropDown class, see module documentation for more information.
'''
pass
class ActionGroup(ActionItem, Spinner):
'''ActionGroup class, see module documentation for more information.
'''
use_separator = BooleanProperty(False)
'''Specifies whether to use a separator after/before this group or not.
:data:`use_separator` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
separator_image = StringProperty(
'atlas://data/images/defaulttheme/separator')
'''Background Image for an ActionSeparator in an ActionView.
:data:`separator_image` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/separator'.
'''
separator_width = NumericProperty(0)
'''Width of the ActionSeparator in an ActionView.
:data:`separator_width` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
mode = OptionProperty('normal', options=('normal', 'spinner'))
'''Sets the current mode of an ActionGroup. If mode is 'normal', the
ActionGroups children will be displayed normally if there is enough
space, otherwise they will be displayed in a spinner. If mode is
'spinner', then the children will always be displayed in a spinner.
:data:`mode` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'normal'.
'''
def __init__(self, **kwargs):
self.list_action_item = []
self._list_overflow_items = []
super(ActionGroup, self).__init__(**kwargs)
self.dropdown_cls = ActionDropDown
def add_widget(self, item):
if isinstance(item, ActionSeparator):
super(ActionGroup, self).add_widget(item)
return
if not isinstance(item, ActionItem):
raise ActionBarException('ActionGroup only accepts ActionItem')
self.list_action_item.append(item)
def show_group(self):
self.clear_widgets()
for item in self._list_overflow_items + self.list_action_item:
item.inside_group = True
self._dropdown.add_widget(item)
def _build_dropdown(self, *largs):
if self._dropdown:
self._dropdown.unbind(on_dismiss=self._toggle_dropdown)
self._dropdown.dismiss()
self._dropdown = None
self._dropdown = self.dropdown_cls()
self._dropdown.bind(on_dismiss=self._toggle_dropdown)
def _update_dropdown(self, *largs):
pass
def _toggle_dropdown(self, *largs):
self.is_open = not self.is_open
ddn = self._dropdown
ddn.size_hint_x = None
if not ddn.container:
return
children = ddn.container.children
ddn.width = max([self.width,
children[0].minimum_width])
for item in children:
item.size_hint_y = None
item.height = max([self.height, sp(48)])
def clear_widgets(self):
self._dropdown.clear_widgets()
class ActionOverflow(ActionGroup):
'''ActionOverflow class, see module documentation for more information.
'''
overflow_image = StringProperty(
'atlas://data/images/defaulttheme/overflow')
'''Image to be used as an Overflow Image.
:data:`overflow_image` is an :class:`~kivy.properties.ObjectProperty` and
defaults to 'atlas://data/images/defaulttheme/overflow'.
'''
def add_widget(self, action_item, index=0):
if action_item is None:
return
if isinstance(action_item, ActionSeparator):
return
if not isinstance(action_item, ActionItem):
raise ActionBarException('ActionView only accepts ActionItem'
' (got {!r}'.format(action_item))
else:
if index == 0:
index = len(self._list_overflow_items)
self._list_overflow_items.insert(index, action_item)
def show_default_items(self, parent):
# display overflow and it's items if widget's directly added to it
if self._list_overflow_items == []:
return
self.show_group()
super(ActionView, parent).add_widget(self)
class ActionView(BoxLayout):
'''ActionView class, see module documentation for more information.
'''
action_previous = ObjectProperty(None)
'''Previous button for an ActionView.
:data:`action_previous` is an :class:`~kivy.properties.ObjectProperty`
and defaults to None.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color in the format (r, g, b, a).
:data:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
background_image = StringProperty(
'atlas://data/images/defaulttheme/action_view')
'''Background image of an ActionViews default graphical representation.
:data:`background_image` is an :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/action_view'.
'''
use_separator = BooleanProperty(False)
'''Specify whether to use a separator before every ActionGroup or not.
:data:`use_separator` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
overflow_group = ObjectProperty(None)
'''Widget to be used for the overflow.
:data:`overflow_group` is an :class:`~kivy.properties.ObjectProperty`
and defaults to an instance of :class:`ActionOverflow`.
'''
def __init__(self, **kwargs):
self._list_action_items = []
self._list_action_group = []
super(ActionView, self).__init__(**kwargs)
self._state = ''
if not self.overflow_group:
self.overflow_group = ActionOverflow(
use_separator=self.use_separator)
def on_action_previous(self, instance, value):
self._list_action_items.insert(0, value)
def add_widget(self, action_item, index=0):
if action_item is None:
return
if not isinstance(action_item, ActionItem):
raise ActionBarException('ActionView only accepts ActionItem'
' (got {!r}'.format(action_item))
elif isinstance(action_item, ActionOverflow):
self.overflow_group = action_item
action_item.use_separator = self.use_separator
elif isinstance(action_item, ActionGroup):
self._list_action_group.append(action_item)
action_item.use_separator = self.use_separator
elif isinstance(action_item, ActionPrevious):
self.action_previous = action_item
else:
super(ActionView, self).add_widget(action_item, index)
if index == 0:
index = len(self._list_action_items)
self._list_action_items.insert(index, action_item)
def on_use_separator(self, instance, value):
for group in self._list_action_group:
group.use_separator = value
self.overflow_group.use_separator = value
def _clear_all(self):
self.clear_widgets()
for group in self._list_action_group:
group.clear_widgets()
self.overflow_group.clear_widgets()
self.overflow_group.list_action_item = []
def _layout_all(self):
# all the items can fit to the view, so expand everything
super_add = super(ActionView, self).add_widget
self._state = 'all'
self._clear_all()
super_add(self.action_previous)
if len(self._list_action_items) > 1:
for child in self._list_action_items[1:]:
child.inside_group = False
super_add(child)
for group in self._list_action_group:
if group.mode == 'spinner':
super_add(group)
group.show_group()
else:
if group.list_action_item != []:
super_add(ActionSeparator())
for child in group.list_action_item:
child.inside_group = False
super_add(child)
self.overflow_group.show_default_items(self)
def _layout_group(self):
# layout all the items in order to pack them per group
super_add = super(ActionView, self).add_widget
self._state = 'group'
self._clear_all()
super_add(self.action_previous)
if len(self._list_action_items) > 1:
for child in self._list_action_items[1:]:
super_add(child)
child.inside_group = False
for group in self._list_action_group:
super_add(group)
group.show_group()
self.overflow_group.show_default_items(self)
def _layout_random(self):
# layout the items in order to pack all of them grouped, and display
# only the action items having 'important'
super_add = super(ActionView, self).add_widget
self._state = 'random'
self._clear_all()
hidden_items = []
hidden_groups = []
total_width = 0
super_add(self.action_previous)
width = (self.width - self.overflow_group.minimum_width -
self.action_previous.minimum_width)
if len(self._list_action_items):
for child in self._list_action_items[1:]:
if child.important:
if child.minimum_width + total_width < width:
super_add(child)
child.inside_group = False
total_width += child.minimum_width
else:
hidden_items.append(child)
else:
hidden_items.append(child)
# if space is left then display ActionItem inside their
# ActionGroup
if total_width < self.width:
for group in self._list_action_group:
if group.minimum_width + total_width +\
group.separator_width < width:
super_add(group)
group.show_group()
total_width += (group.minimum_width +
group.separator_width)
else:
hidden_groups.append(group)
group_index = len(self.children) - 1
# if space is left then display other ActionItems
if total_width < self.width:
for child in hidden_items[:]:
if child.minimum_width + total_width < width:
super_add(child, group_index)
total_width += child.minimum_width
child.inside_group = False
hidden_items.remove(child)
# for all the remaining ActionItems and ActionItems with in
# ActionGroups, Display them inside overflow_group
extend_hidden = hidden_items.extend
for group in hidden_groups:
extend_hidden(group.list_action_item)
overflow_group = self.overflow_group
if hidden_items != []:
over_add = super(overflow_group.__class__,
overflow_group).add_widget
for child in hidden_items:
over_add(child)
overflow_group.show_group()
super_add(overflow_group)
def on_width(self, width, *args):
# determine the layout to use
# can we display all of them?
total_width = 0
for child in self._list_action_items:
total_width += child.minimum_width
for group in self._list_action_group:
for child in group.list_action_item:
total_width += child.minimum_width
if total_width <= self.width:
if self._state != 'all':
self._layout_all()
return
# can we display them per group?
total_width = 0
for child in self._list_action_items:
total_width += child.minimum_width
for group in self._list_action_group:
total_width += group.minimum_width
if total_width < self.width:
# ok, we can display all the items grouped
if self._state != 'group':
self._layout_group()
return
# none of the solutions worked, display them in pack mode
self._layout_random()
class ContextualActionView(ActionView):
'''ContextualActionView class, see the module documentation
for more information.
'''
pass
class ActionBar(BoxLayout):
'''ActionBar, see the module documentation for more information.
:Events:
`on_previous`
Fired when action_previous of action_view is pressed.
'''
action_view = ObjectProperty(None)
'''action_view of ActionBar.
:data:`action_view` is an :class:`~kivy.properties.ObjectProperty` and
defaults to an instance of ActionView.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
:data:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
background_image = StringProperty(
'atlas://data/images/defaulttheme/action_bar')
'''Background image of the ActionBars default graphical representation.
:data:`background_image` is an :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/action_bar'.
'''
border = ListProperty([2, 2, 2, 2])
''':data:`border` to be applied to the :data:`background_image`.
'''
__events__ = ('on_previous',)
def __init__(self, **kwargs):
super(ActionBar, self).__init__(**kwargs)
self._stack_cont_action_view = []
self._emit_previous = partial(self.dispatch, 'on_previous')
def add_widget(self, view):
if isinstance(view, ContextualActionView):
self._stack_cont_action_view.append(view)
if view.action_previous is not None:
view.action_previous.unbind(on_release=self._emit_previous)
view.action_previous.bind(on_release=self._emit_previous)
self.clear_widgets()
super(ActionBar, self).add_widget(view)
elif isinstance(view, ActionView):
self.action_view = view
super(ActionBar, self).add_widget(view)
else:
raise ActionBarException(
'ActionBar can only add ContextualActionView or ActionView')
def on_previous(self, *args):
self._pop_contextual_action_view()
def _pop_contextual_action_view(self):
'''Remove the current ContextualActionView and display either the
previous one or the ActionView.
'''
self._stack_cont_action_view.pop()
self.clear_widgets()
if self._stack_cont_action_view == []:
super(ActionBar, self).add_widget(self.action_view)
else:
super(ActionBar, self).add_widget(self._stack_cont_action_view[-1])
if __name__ == "__main__":
from kivy.base import runTouchApp
from kivy.uix.floatlayout import FloatLayout
from kivy.factory import Factory
# XXX clean the first registration done from '__main__' here.
# otherwise kivy.uix.actionbar.ActionPrevious != __main__.ActionPrevious
Factory.unregister('ActionPrevious')
Builder.load_string('''
<MainWindow>:
ActionBar:
pos_hint: {'top':1}
ActionView:
use_separator: True
ActionPrevious:
title: 'Action Bar'
with_previous: False
ActionOverflow:
ActionButton:
text: 'Btn0'
icon: 'atlas://data/images/defaulttheme/audio-volume-high'
ActionButton:
text: 'Btn1'
ActionButton:
text: 'Btn2'
ActionGroup:
text: 'Group 2'
ActionButton:
text: 'Btn3'
ActionButton:
text: 'Btn4'
ActionGroup:
text: 'Group1'
ActionButton:
text: 'Btn5'
ActionButton:
text: 'Btn6'
ActionButton:
text: 'Btn7'
''')
class MainWindow(FloatLayout):
pass
float_layout = MainWindow()
runTouchApp(float_layout)
|
|
from ctypes import (
c_char_p, c_char, c_int, c_uint, c_ubyte, c_void_p, c_double, c_size_t,
Structure, Union, POINTER
)
# Data types (artypes.h).
ARLong32 = c_int
ARULong32 = c_uint
ARUIntPtr = c_size_t
# General limits (ar.h line 67).
# max actions for 1 filter/active link
AR_MAX_ACTIONS = 25
# max size of an active link message
AR_MAX_AL_MESSAGE_SIZE = 4095
# max size of auth string
AR_MAX_AUTH_SIZE = 2047
# max size of an auto item string
AR_MAX_AUTOMATION_SIZE = 2000
# max size of a temporary buffer
AR_MAX_BUFFER_SIZE = 352
# max size of a character menu value
AR_MAX_CMENU_SIZE = 255
# max size of a user command string
AR_MAX_COMMAND_SIZE = 255
# max size of a user commandLong string
AR_MAX_COMMAND_SIZE_LONG = 4096
# max size of a custom date time format
AR_MAX_FORMAT_SIZE = 32
# max size of a dde item string
AR_MAX_DDE_ITEM = 32767
# max size of a dde service/topic name
AR_MAX_DDE_NAME = 64
# max size of a COM name
AR_MAX_COM_NAME = 1024
# max size of a method string
AR_MAX_COM_METHOD_NAME = 128
# max size of a COM clsId/methodIId
AR_MAX_COM_ID_SIZE = 128
# max size of a character type default
AR_MAX_DEFAULT_SIZE = 255
# max size of a notify email address
AR_MAX_EMAIL_ADDR = 255
# max size of an entry id in the system
AR_MAX_ENTRYID_SIZE = 15
# max size of goto label string
AR_MAX_GOTOGUIDE_LABEL_SIZE = 128
# max size of the group list field
AR_MAX_GROUPLIST_SIZE = 255
# max size for a GUID string
AR_MAX_GUID_SIZE = 30
# max size for a GUID prefix
AR_MAX_GUID_PREFIX_SIZE = 2
# max bytes in ALL columns in an index
AR_MAX_INDEX_BYTES = 255
# max fields in an index
AR_MAX_INDEX_FIELDS = 16
# max size of a language name
AR_MAX_LANG_SIZE = 15
# max size of a license name
AR_MAX_LICENSE_NAME_SIZE = 50
# max size of a license key
AR_MAX_LICENSE_KEY_SIZE = 30
# max size of a locale
AR_MAX_LOCALE_SIZE = 64
# max size of a macro value
AR_MAX_MACRO_VALUE = 255
# max menu items in any single menu for char field default sets
AR_MAX_MENU_ITEMS = 199
# max menu levels for char field default sets
AR_MAX_MENU_LEVELS = 15
# max levels for Dynamic Query and SQL menu
AR_MAX_LEVELS_DYNAMIC_MENU = 5
# max size of a status message
AR_MAX_MESSAGE_SIZE = 255
# max entries that can be handled by a multiple entries call
AR_MAX_MULT_ENTRIES = 100
# max size of a name in the system
AR_MAX_NAME_SIZE = 254
# max size of an user name type (user,group)
AR_MAX_ACCESS_NAME_SIZE = 254
# max size of an access name type
AR_MAX_ACCESS_NAME_SIZE_63 = 30
# max size of a password type
AR_MAX_PASSWORD_SIZE = 30
# max size of hashed string
AR_MAX_HASH_SIZE = 28
# max size of an encrypted password type
AR_MAX_ENCRYPTED_PASSWORD_SIZE = 120
# max number of chars in an object name
AR_MAX_NAME_CHARACTERS = 80
# max size of a notify user line
AR_MAX_NOTIFY_USER = 255
# max size of a character limit pattern
AR_MAX_PATTERN_SIZE = 255
# max size of related to field
AR_MAX_RELATED_SIZE = 128
# max size schemaid in flat file schema cache
AR_MAX_SCHEMAID_SIZE = 5
# max size of a server name
AR_MAX_SERVER_SIZE = 64
# max size of a server name
AR_MAX_LINE_LENGTH = 2048
# max size of a short description
AR_MAX_SDESC_SIZE = 254
# max size of a notify subject line
AR_MAX_SUBJECT_SIZE = 255
# max size of a host id
AR_MAX_HOSTID_SIZE = 100
# max size of targetLocation string in activeLink OpenDlg struct
AR_MAX_TARGET_STRING_SIZE = 255
# max size of user identifier
AR_MAX_USER_GUID_SIZE = 128
# max size of wait continue label
AR_MAX_WAIT_CONT_TITLE_SIZE = 64
# Filename Limits (ar.h line 126). These are restrictive so the names will be
# legal on any target system and these limits are smallest.
AR_MAX_FILENAME_SIZE = 12
AR_MAX_FILENAME_BASE = 8
AR_MAX_FULL_FILENAME = 255
# Table and column field size limits (ar.h line 180).
# max data size displayed in a column
AR_MAX_COLFLD_COLLENGTH = 255
# max len of details in svr event form
AR_MAX_SVR_EVENT_DETAILS = 255
# max len of a server event list string
AR_MAX_SVR_EVENT_LIST = 255
# maximum external table name size
AR_MAX_TABLENAME_SIZE = 2047
# max columns in a table field
AR_MAX_TBLFLD_NUMCOLS = 255
# max rows returned in a refresh
AR_MAX_TBLFLD_RETROWS = 9999
# Decimal and currency limits (ar.h line 235).
AR_MAX_DECIMAL_SIZE = 64
AR_MAX_CURRENCY_CODE_SIZE = 3
AR_MAX_CURRENCY_RATIO_SIZE = 64
AR_CURRENT_CURRENCY_RATIOS = 0
# Name for the system constants relating to the ARBoolean type (ar.h line 249).
FALSE = 0
TRUE = 1
# Codes for return values from API routines (ar.h line 264).
# successful; status may contain notes
AR_RETURN_OK = 0
# successful?; status contains details
AR_RETURN_WARNING = 1
# failure; status contains details
AR_RETURN_ERROR = 2
# failure; status may or may not contain any details
AR_RETURN_FATAL = 3
# status structure is invalid
AR_RETURN_BAD_STATUS = 4
# status for the active link action
AR_RETURN_PROMPT = 5
# status message for client accessibility
AR_RETURN_ACCESSIBLE = 6
# message type for ToolTips message action
AR_RETURN_TOOLTIP = 7
# Remedy data types (ar.h line 534).
# code for a NULL value
AR_DATA_TYPE_NULL = 0
# code indicating a keyword setting
AR_DATA_TYPE_KEYWORD = 1
# codes for the data type of a value
AR_DATA_TYPE_INTEGER = 2
AR_DATA_TYPE_REAL = 3
AR_DATA_TYPE_CHAR = 4
AR_DATA_TYPE_DIARY = 5
AR_DATA_TYPE_ENUM = 6
AR_DATA_TYPE_TIME = 7
AR_DATA_TYPE_BITMASK = 8
AR_DATA_TYPE_BYTES = 9
AR_DATA_TYPE_DECIMAL = 10
AR_DATA_TYPE_ATTACH = 11
AR_DATA_TYPE_CURRENCY = 12
AR_DATA_TYPE_DATE = 13
AR_DATA_TYPE_TIME_OF_DAY = 14
# Field data types (ar.h line 568).
# per record stored data field type
AR_FIELD_TYPE_DATA = 1
# visual trim field type
AR_FIELD_TYPE_TRIM = 2
# GUI control field type
AR_FIELD_TYPE_CONTROL = 4
# page field type
AR_FIELD_TYPE_PAGE = 8
# page holder field type
AR_FIELD_TYPE_PAGE_HOLDER = 16
# table field type
AR_FIELD_TYPE_TABLE = 32
# column field type
AR_FIELD_TYPE_COLUMN = 64
# attachment field type
AR_FIELD_TYPE_ATTACH = 128
# attachment pool type
AR_FIELD_TYPE_ATTACH_POOL = 256
# Entry retrieval limits (ar.h line 841).
# code to indicate should retrieve from result set starting with first entry
AR_START_WITH_FIRST_ENTRY = 0
# code to indicate no maximum limit for number of entries retrieved in list
AR_NO_MAX_LIST_RETRIEVE = 0
# retrieve all entries even if there is a limit on the number of entries that
# the server will return
AR_RETRIEVE_ALL_ENTRIES = 999999999
# Enum styles (ar.h line 3845).
# list auto-indexed starting at 0
AR_ENUM_STYLE_REGULAR = 1
# list indexed manually, gaps in numbers OK
AR_ENUM_STYLE_CUSTOM = 2
# search performed to find name/number pairs
AR_ENUM_STYLE_QUERY = 3
# Schema types (ar.h line 5525).
# get list of all schemas
AR_LIST_SCHEMA_ALL = 0
# get list of all regular schemas
AR_LIST_SCHEMA_REGULAR = 1
# get list of all join schemas
AR_LIST_SCHEMA_JOIN = 2
# get list of all view schemas
AR_LIST_SCHEMA_VIEW = 3
# get list of all schemas depending on given schema
AR_LIST_SCHEMA_UPLINK = 4
# get list of all schemas the given schema bases on
AR_LIST_SCHEMA_DOWNLINK = 5
# get list of all dialog schemas
AR_LIST_SCHEMA_DIALOG = 6
# get list of all schemas with database fields
AR_LIST_SCHEMA_ALL_WITH_DATA = 7
# get list of all vendor schemas
AR_LIST_SCHEMA_VENDOR = 8
# get list of all schemas allowed in multi-form searches
AR_LIST_SCHEMA_ALLOWED_IN_MFSEARCH = 9
# code added to above to "include hidden schemas" in the list returned
AR_HIDDEN_INCREMENT = 1024
# SetEntry options (ar.h line 5555).
# don't enforce join referential integrity
AR_JOIN_SETOPTION_NONE = 0
# enforce join referential integrity For internal API workflow
AR_JOIN_SETOPTION_REF = 1
# DeleteEntry options (ar.h line 5566).
# individual entries will be deleted only when the entry can be retrieved
# through the join schema
AR_JOIN_DELOPTION_NONE = 0
# delete individual entries even when the entry cannot be retrieved from the
# join schema. Error will be ignored for those entry pieces that are no longer
# existing.
AR_JOIN_DELOPTION_FORCE = 1
# Type definitions (ar.h line 275).
# boolean flag set to TRUE or FALSE
ARBoolean = c_ubyte
# structure to hold an entry id value
AREntryIdType = c_char * (AR_MAX_ENTRYID_SIZE + 1)
# structure to hold an internal id
ARInternalId = ARULong32
# structure to hold an object name
ARNameType = c_char * (AR_MAX_NAME_SIZE + 1)
# structure to hold password
ARPasswordType = c_char * (AR_MAX_PASSWORD_SIZE + 1)
# structure to hold an auth string
ARAuthType = c_char * (AR_MAX_AUTH_SIZE + 1)
# structure to hold a file name
ARFileNameType = c_char * (AR_MAX_FULL_FILENAME + 1)
# structure to hold an access name
ARAccessNameType = c_char * (AR_MAX_ACCESS_NAME_SIZE + 1)
# structure to hold an encrypted password
AREncryptedPasswordType = c_char * (AR_MAX_ENCRYPTED_PASSWORD_SIZE + 1)
# structure to hold a server name
ARServerNameType = c_char * (AR_MAX_SERVER_SIZE + 1)
# timestamp; Unix style timestamp (seconds since Jan. 1, 1970)
ARTimestamp = ARLong32
# structure to hold a license name
ARLicenseNameType = c_char * (AR_MAX_LICENSE_NAME_SIZE + 1)
# structure to hold a license key
ARLicenseKeyType = c_char * (AR_MAX_LICENSE_KEY_SIZE + 1)
# used to hold host id string
ARHostIDType = c_char * (AR_MAX_HOSTID_SIZE + 1)
# structure to hold a locale string
ARLocaleType = c_char * (AR_MAX_LOCALE_SIZE + 1)
# structure to hold a menu entry
ARCMenuType = c_char * (AR_MAX_CMENU_SIZE + 1)
# structure to hold a table name
ARTableNameType = c_char * (AR_MAX_TABLENAME_SIZE + 1)
# (seconds since midnight 00.00.00)
ARTime = ARLong32
ARCurrencyCodeType = c_char * (AR_MAX_CURRENCY_CODE_SIZE + 1)
class ARNameList(Structure):
"""List of 0 or more object names (ar.h line 354)."""
_fields_ = [
('numItems', c_uint),
('nameList', POINTER(ARNameType))
]
class ARInternalIdList(Structure):
"""List of 0 or more internal ids (ar.h line 330)."""
_fields_ = [
('numItems', c_uint),
('internalIdList', POINTER(ARInternalId))
]
class AREntryIdList(Structure):
"""List of 0 or more entry ids (ar.h line 322)."""
_fields_ = [
('numItems', c_uint),
('entryIdList', POINTER(AREntryIdType))
]
class ARAccessNameList(Structure):
"""List of 0 or more access names (ar.h line 374)."""
_fields_ = [
('numItems', c_uint),
('nameList', POINTER(ARAccessNameType))
]
class ARTextStringList(Structure):
"""List of 0 or more character strings (ar.h line 403)."""
_fields_ = [
('numItems', c_uint),
('stringList', POINTER(c_char_p))
]
class ARTimestampList(Structure):
"""List of 0 or more timestamps (ar.h line 411)."""
_fields_ = [
('numItems', c_uint),
('timestampList', POINTER(ARTimestamp))
]
class ARUnsignedIntList(Structure):
"""List of 0 or more unsigned integers (ar.h line 419)."""
_fields_ = [
('numItems', c_uint),
('intList', POINTER(c_uint))
]
class ARByteList(Structure):
"""Byte stream (ar.h line 447)."""
_fields_ = [
# type of list
('type', ARULong32),
('noval_', ARULong32),
# length of bytes
('numItems', c_uint),
# not NULL terminated
('bytes', POINTER(c_ubyte))
]
class ARLocalizationInfo(Structure):
"""Localisation information (ar.h line 456)."""
_fields_ = [
('locale', c_char * (AR_MAX_LOCALE_SIZE + 1)),
('charSet', c_char * (AR_MAX_LANG_SIZE + 1)),
('timeZone', c_char * (AR_MAX_LOCALE_SIZE + 1)),
('customDateFormat', c_char * (AR_MAX_FORMAT_SIZE + 1)),
('customTimeFormat', c_char * (AR_MAX_FORMAT_SIZE + 1)),
('separators', c_char * (AR_MAX_LANG_SIZE + 1))
]
class ARControlStruct(Structure):
"""
Control record containing information about the user and the
environment (ar.h line 467). An instance of this structure will be the
first parameter of all the calls supported by the AR system.
.. note:: Server is listed last in the structure below as it is not passed
in the RPC call. It is not needed on the server (who already knows
who he is). By placing it last, there can still be a "clean" mapping
of the first part of the record with the RPC structure.
"""
_fields_ = [
# id assigned and used by the system for efficient cache access
('cacheId', ARLong32),
# time at which the operation was performed
('operationTime', ARTimestamp),
# username and password for access control
('user', ARAccessNameType),
('password', ARPasswordType),
# locale information
('localeInfo', ARLocalizationInfo),
# API session identifier
('sessionId', ARUIntPtr),
# Windows domain
('authString', ARAuthType),
# server to access
('server', c_char * (AR_MAX_SERVER_SIZE + 1))
]
class ARStatusStruct(Structure):
"""Type of error (ar.h line 498)."""
_fields_ = [
('messageType', c_uint),
('messageNum', ARLong32),
('messageText', c_char_p),
('appendedText', c_char_p)
]
class ARStatusList(Structure):
"""List of 0 or more status messages (ar.h line 508)."""
_fields_ = [
('numItems', c_uint),
('statusList', POINTER(ARStatusStruct))
]
class ARCoordStruct(Structure):
"""Coordinates in typographic points (i.e. pixels) (ar.h line 694)."""
_fields_ = [
('x', ARLong32),
('y', ARLong32)
]
class ARCoordList(Structure):
"""Ordered list of 0 or more coordinates (ar.h line 701)."""
_fields_ = [
('numItems', c_uint),
('coords', POINTER(ARCoordStruct))
]
class ARBufStruct(Structure):
"""A generic buffer (ar.h line 714)."""
_fields_ = [
('bufSize', ARULong32),
('buffer', POINTER(c_ubyte)),
]
class ARLocUnion(Union):
"""Union relating to locating an attachment (ar.h line 722)."""
_fields_ = [
# filename to open
('filename', c_char_p),
# memory buffer
('buf', ARBufStruct)
]
class ARLocStruct(Structure):
"""Structure relating to locating an attachment (ar.h line 722)."""
_fields_ = [
# AR_LOC_FILENAME | AR_LOC_BUFFER
('locType', ARULong32),
('u', ARLocUnion)
]
class ARAttachStruct(Structure):
"""An attachment (ar.h line 734)."""
_fields_ = [
# name of attachment
('name', c_char_p),
# pre-compression number of bytes
('origSize', ARLong32),
# post-compression number of bytes
('compSize', ARLong32),
# how to locate attachment content
('loc', ARLocStruct)
]
class ARFuncCurrencyStruct(Structure):
"""A functional currency (ar.h line 744)."""
_fields_ = [
# numeric currency value
('value', c_char_p),
# ISO currency code
('currencyCode', ARCurrencyCodeType)
]
class ARFuncCurrencyList(Structure):
"""List of 0 or more functional currencies (ar.h line 752)."""
_fields_ = [
('numItems', c_uint),
('funcCurrencyList', POINTER(ARFuncCurrencyStruct))
]
class ARCurrencyStruct(Structure):
"""A currency value (ar.h line 760)."""
_fields_ = [
# numeric value of currency
('value', c_char_p),
# ISO currency code
('currencyCode', ARCurrencyCodeType),
# timestamp of conversion
('conversionDate', ARTimestamp),
# list of functional currencies
('funcList', ARFuncCurrencyList)
]
class ARValueUnion(Union):
"""Union used to hold a value (ar.h line 777)."""
_fields_ = [
# noval_ is big enough to initialize both integer and pointer
# union members in declarations like ARValueStruct val = { 0, {0}}
('noval_', c_size_t),
('keyNum', c_uint),
('intVal', ARLong32),
('realVal', c_double),
('charVal', c_char_p),
('diaryVal', c_char_p),
('enumVal', ARULong32),
('timeVal', ARTimestamp),
('maskVal', ARULong32),
('timeOfDayVal', ARTime),
('byteListVal', POINTER(ARByteList)),
('decimalVal', c_char_p),
('attachVal', POINTER(ARAttachStruct)),
('ulongVal', ARULong32),
('coordListVal', POINTER(ARCoordList)),
('dateVal', c_int),
('currencyVal', POINTER(ARCurrencyStruct)),
# Placeholder for passing pointers through this data structure.
# Can only be used locally - you can't XDR a pointer unless
# you know the type of object being referenced.
('ptrVal', c_void_p)
]
class ARValueStruct(Structure):
"""
Structure used to hold a value (ar.h line 777). There is one branch
for each datatype/property that is supported by the system.
"""
_fields_ = [
# AR_DATA_TYPE_xxx
('dataType', c_uint),
('u', ARValueUnion)
]
class ARValueList(Structure):
"""List of values (ar.h line 817)."""
_fields_ = [
('numItems', c_uint),
('valueList', POINTER(ARValueStruct))
]
class AREntryListFieldStruct(Structure):
"""Definition for a field in the entry list (ar.h line 850)."""
_fields_ = [
('fieldId', ARInternalId),
('columnWidth', c_uint),
('separator', c_char * 10)
]
class AREntryListFieldList(Structure):
"""List of 0 or more fields in entrylist (ar.h line 858)."""
_fields_ = [
('numItems', c_uint),
('fieldsList', POINTER(AREntryListFieldStruct))
]
class ARFieldValueStruct(Structure):
"""An id and value for a single field (ar.h line 917)."""
_fields_ = [
('fieldId', ARInternalId),
('value', ARValueStruct)
]
class ARFieldValueList(Structure):
"""List of 0 or more field/value pairs (ar.h line 925)."""
_fields_ = [
('numItems', c_uint),
('fieldValueList', POINTER(ARFieldValueStruct))
]
class AREntryListFieldValueStruct(Structure):
"""
Parallel entry list structures which are used to return entryList as a
list of entryId and entry as field/value pairs (ar.h line 933).
"""
_fields_ = [
('entryId', AREntryIdList),
('entryValues', POINTER(ARFieldValueList))
]
class AREntryListFieldValueList(Structure):
"""List of 0 or more entries (ar.h line 944)."""
_fields_ = [
('numItems', c_uint),
('entryList', POINTER(AREntryListFieldValueStruct))
]
class ARBooleanList(Structure):
"""List of 0 or more ARBoolean (ar.h line 982)."""
_fields_ = [
('numItems', c_uint),
('booleanList', POINTER(ARBoolean))
]
class ARStatHistoryValue(Structure):
"""
Special selection field that stores user and time stamp information for
each of the defined status values (ar.h line 1036).
"""
_fields_ = [
('enumVal', ARULong32),
('userOrTime', c_uint)
]
class ARCurrencyPartStruct(Structure):
"""
Part of a currency field that combine to represent a complete currency
value (ar.h line 1067).
"""
_fields_ = [
('fieldId', ARInternalId),
('partTag', c_uint),
('currencyCode', ARCurrencyCodeType)
]
class ARQualifierStruct(Structure):
"""
Structure used to hold a qualification which entries to retrieve when
creating a query result list (ARGetListEntry) or computing entry statistics
(ARGetEntryStatistics) (ar.h line 1029 and 1189).
"""
pass
class ARQueryValueStruct(Structure):
"""Query value used in relational qualifications (ar.h line 1049)."""
_fields_ = [
('schema', ARNameType),
('server', c_char * (AR_MAX_SERVER_SIZE + 1)),
('qualifier', POINTER(ARQualifierStruct)),
('valueField', ARInternalId),
('multiMatchCode', c_uint)
]
class ARArithOpStruct(Structure):
"""Result value from an arithmetic operation (ar.h line 1146)."""
pass
class ARFieldValueOrArithUnion(Union):
"""
Union used to hold values to compare in a relational qualification
operation (ar.h line 1116).
"""
_fields_ = [
# noval_ is big enough to initialize both integer and pointer
# union members in declarations like
# ARFieldValueOrArithStruct val = { 0, {0}};
('noval_', c_size_t),
('fieldId', ARInternalId),
('value', ARValueStruct),
('arithOp', POINTER(ARArithOpStruct)),
('statHistory', ARStatHistoryValue),
('valueSet', ARValueList),
('variable', c_uint),
('queryValue', POINTER(ARQueryValueStruct)),
('currencyField', POINTER(ARCurrencyPartStruct))
]
class ARFieldValueOrArithStruct(Structure):
"""
Structure used to hold values to compare in a relational qualification
operation (ar.h line 1116).
"""
_fields_ = [
('tag', c_uint),
('u', ARFieldValueOrArithUnion)
]
ARArithOpStruct._fields_ = [
('operation', c_uint),
('operandLeft', ARFieldValueOrArithStruct),
('operandRight', ARFieldValueOrArithStruct)
]
class ARRelOpStruct(Structure):
"""Relational qualification operator (ar.h line 1164)."""
_fields_ = [
('operation', c_uint),
('operandLeft', ARFieldValueOrArithStruct),
('operandRight', ARFieldValueOrArithStruct),
]
class ARAndOrStruct(Structure):
"""Logical qualification operator (ar.h line 1179)."""
_fields_ = [
('operandLeft', POINTER(ARQualifierStruct)),
('operandRight', POINTER(ARQualifierStruct))
]
class ARQualifierUnion(Union):
"""Union used to hold a qualification (ar.h line 1189)."""
_fields_ = [
('andor', ARAndOrStruct),
('notQual', POINTER(ARQualifierStruct)),
('relOp', POINTER(ARRelOpStruct)),
('fieldId', ARInternalId)
]
ARQualifierStruct._fields_ = [
('operation', c_uint),
('u', ARQualifierUnion)
]
class ARSortStruct(Structure):
"""Sort criteria (ar.h line 1216)."""
_fields_ = [
('fieldId', ARInternalId),
('sortOrder', c_uint)
]
class ARSortList(Structure):
"""List of 0 or more sort criteria (ar.h line 1224)."""
_fields_ = [
('numItems', c_uint),
('sortList', POINTER(ARSortStruct))
]
class ARPropStruct(Structure):
"""A display/object property (ar.h line 1388)."""
_fields_ = [
# AR_*PROP_*; property tag
('prop', ARULong32),
('value', ARValueStruct)
]
class ARPropList(Structure):
"""List of 0 or more display/object properties (ar.h line 1396)."""
_fields_ = [
('numItems', c_uint),
('props', POINTER(ARPropStruct))
]
class ARPropListList(Structure):
"""List of 0 or more display/object properties lists (ar.h line 1404)."""
_fields_ = [
('numItems', c_uint),
('propsList', POINTER(ARPropList))
]
class ARDisplayInstanceStruct(Structure):
"""A display instance (ar.h line 1412)."""
_fields_ = [
# VUI to which display belongs
('vui', ARInternalId),
# properties specific to the vui
('props', ARPropList)
]
class ARDisplayInstanceList(Structure):
"""List of 0 or more display instances (ar.h line 1420)."""
_fields_ = [
# properties common across displays
('commonProps', ARPropList),
# properties specific to one display
# ASSERT ALIGN(this.numItems) >= ALIGN_NEEDED_BY(this.dInstanceList)
('numItems', c_uint),
('dInstanceList', POINTER(ARDisplayInstanceStruct))
]
class ARDisplayInstanceListList(Structure):
"""List of 0 or more display instance lists (ar.h line 1431)."""
_fields_ = [
('numItems', c_uint),
('dInstanceList', POINTER(ARDisplayInstanceList))
]
class ARPermissionStruct(Structure):
"""A group and the permissions defined (ar.h line 3564)."""
_fields_ = [
('groupId', ARInternalId),
('permissions', c_uint)
]
class ARPermissionList(Structure):
"""List of 0 or more permission entries (ar.h line 3571)."""
_fields_ = [
('numItems', c_uint),
('permissionList', POINTER(ARPermissionStruct))
]
class ARPermissionListList(Structure):
"""List of 0 or more permission lists (ar.h line 3578)."""
_fields_ = [
('numItems', c_uint),
('permissionList', POINTER(ARPermissionList))
]
class ARIntegerLimitsStruct(Structure):
"""Integer limits (ar.h line 3780)."""
_fields_ = [
('rangeLow', ARLong32),
('rangeHigh', ARLong32)
]
class ARRealLimitsStruct(Structure):
"""Real number limits (ar.h line 3789)."""
_fields_ = [
('rangeLow', c_double),
('rangeHigh', c_double),
('precision', c_int)
]
class ARCharLimitsStruct(Structure):
"""Character limits (ar.h line 3820)."""
_fields_ = [
('maxLength', c_uint),
# append or overwrite with new menu selections
('menuStyle', c_uint),
# operation to use from QBE type operation
('qbeMatchOperation', c_uint),
# name of character menu associated to field
('charMenu', ARNameType),
# pattern, incl wildcards, value must match
('pattern', c_char_p),
# Full Text options
('fullTextOptions', c_uint),
# 0 for in-byte, 1 for in-char
('lengthUnits', c_uint),
# 0 for Default, 1 for In-Row and 2 for Out-of-Row
('storageOptionForCLOB', c_uint)
]
class ARDiaryLimitsStruct(Structure):
"""Diary limits (ar.h line 3839)."""
_fields_ = [
('fullTextOptions', c_uint)
]
class AREnumItemStruct(Structure):
"""Custom enum item (ar.h line 3849)."""
_fields_ = [
('itemName', ARNameType),
('itemNumber', ARULong32)
]
class AREnumItemList(Structure):
"""Custom enum limits (ar.h line 3856)."""
_fields_ = [
('numItems', c_uint),
('enumItemList', POINTER(AREnumItemStruct))
]
class AREnumQueryStruct(Structure):
"""Query definition for query enum limits (ar.h line 3863)."""
_fields_ = [
('schema', ARNameType),
('server', c_char * (AR_MAX_SERVER_SIZE + 1)),
('qualifier', ARQualifierStruct),
('nameField', ARInternalId),
('numberField', ARInternalId)
]
class AREnumLimitsUnion(Union):
"""Union used to hold enum limits (ar.h line 3873)."""
_fields_ = [
('regularList', ARNameList),
('customList', AREnumItemList),
('queryList', AREnumQueryStruct)
]
class AREnumLimitsStruct(Structure):
"""Structure used to hold enum limits (ar.h line 3873)."""
_fields_ = [
('listStyle', c_uint),
('u', AREnumLimitsUnion)
]
class ARAttachLimitsStruct(Structure):
"""Attachment limits (ar.h line 3888)."""
_fields_ = [
# 0 means unlimited
('maxSize', ARULong32),
('attachType', c_uint),
('fullTextOptions', c_uint)
]
class ARTableLimitsStruct(Structure):
"""Table limits (ar.h line 3896)."""
_fields_ = [
# number of columns in table field
('numColumns', c_uint),
# qualification for table field
('qualifier', ARQualifierStruct),
# max rows to retrieve
('maxRetrieve', c_uint),
# data fields belong to this schema
('schema', ARNameType),
# that schema is in this server
('server', ARServerNameType),
('sampleSchema', ARNameType),
('sampleServer', ARServerNameType)
]
class ARColumnLimitsStruct(Structure):
"""Column limits (ar.h line 3921)."""
_fields_ = [
# parent field column field belongs to
('parent', ARInternalId),
# remote fieldId form which data comes
('dataField', ARInternalId),
# data source for the above dataField
('dataSource', c_uint),
# column length to display - char fields
('colLength', c_uint)
]
class ARDecimalLimitsStruct(Structure):
"""Decimal limits (ar.h line 3933)."""
_fields_ = [
('rangeLow', c_char_p),
('rangeHigh', c_char_p),
# number of places to right of dec point
('precision', c_int),
]
class ARViewLimits(Structure):
"""View limits (ar.h line 3941)."""
_fields_ = [
# 0 means unlimited length
('maxLength', c_uint)
]
class ARDisplayLimits(Structure):
"""Display limits (ar.h line 3947)."""
_fields_ = [
# 0 means unlimited length
('maxLength', c_uint),
# 0 for in-byte, 1 for in-char
('lengthUnits', c_uint),
]
class ARDateLimitsStruct(Structure):
"""Date limits (ar.h line 3953)."""
_fields_ = [
# minimum date value, in julian days
('minDate', c_int),
# maximum date value, in julian days
('maxDate', c_int)
]
class ARCurrencyDetailStruct(Structure):
"""Details of a currency limit (ar.h line 3963)."""
_fields_ = [
# currency type
('currencyCode', ARCurrencyCodeType),
# number of places to right of dec point
('precision', c_int)
]
class ARCurrencyDetailList(Structure):
"""List of currency limit details (ar.h line 3971)."""
_fields_ = [
('numItems', c_uint),
('currencyDetailList', POINTER(ARCurrencyDetailStruct))
]
class ARCurrencyLimitsStruct(Structure):
"""Currency limits (ar.h line 3978)."""
_fields_ = [
('rangeLow', c_char_p),
('rangeHigh', c_char_p),
# number of places to right of dec point
('precision', c_int),
('functionalCurrencies', ARCurrencyDetailList),
('allowableCurrencies', ARCurrencyDetailList),
]
class ARFieldLimitUnion(Union):
"""Union used to hold field limits (ar.h line 3991)."""
_fields_ = [
('intLimits', ARIntegerLimitsStruct),
('realLimits', ARRealLimitsStruct),
('charLimits', ARCharLimitsStruct),
('diaryLimits', ARDiaryLimitsStruct),
('enumLimits', AREnumLimitsStruct),
# time has no external limits
('maskLimits', AREnumLimitsStruct),
# bytes has no external limits
('attachLimits', ARAttachLimitsStruct),
('tableLimits', ARTableLimitsStruct),
('columnLimits', ARColumnLimitsStruct),
('decimalLimits', ARDecimalLimitsStruct),
('viewLimits', ARViewLimits),
('displayLimits', ARDisplayLimits),
('dateLimits', ARDateLimitsStruct),
('currencyLimits', ARCurrencyLimitsStruct)
]
class ARFieldLimitStruct(Structure):
"""Structure used to hold field limits (ar.h line 3991)."""
_fields_ = [
('dataType', c_uint),
('u', ARFieldLimitUnion)
]
class ARFieldLimitList(Structure):
"""List of 0 or more FieldLimitStructs (ar.h line 4015)."""
_fields_ = [
('numItems', c_uint),
('fieldLimitList', POINTER(ARFieldLimitStruct))
]
class ARJoinMappingStruct(Structure):
"""Join field mapping (ar.h line 5453)."""
_fields_ = [
# 0 - primary, 1 - secondary
('schemaIndex', c_uint),
# field id of member schema
('realId', ARInternalId)
]
class ARViewMappingStruct(Structure):
"""View field mapping (ar.h line 5460)."""
_fields_ = [
# field name of external table
('fieldName', ARNameType)
]
class ARVendorMappingStruct(Structure):
"""Vendor field mapping (ar.h line 5466)."""
_fields_ = [
# field name in external table
('fieldName', ARNameType)
]
class ARInheritanceMappingStruct(Structure):
"""Inheritance field mapping (ar.h line 5472)."""
_fields_ = [
# NULL means this is not a reference field
('srcSchema', ARNameType),
# a bitmask indicates which field characteristics are inherited. For
# each bit, 1 means it is inherited, 0 means it is overwritten. This
# only has meaning if srcSchema is not an empty string
('referenceMask', c_uint),
# 0 means field doesn't reference DATA
('dataMappingId', c_uint)
]
class ARFieldMappingUnion(Union):
"""Union relating to a field mapping (ar.h line 5489)."""
_fields_ = [
('join', ARJoinMappingStruct),
('view', ARViewMappingStruct),
('vendor', ARVendorMappingStruct),
('inheritance', ARInheritanceMappingStruct)
]
class ARFieldMappingStruct(Structure):
"""
Structure relating to a field mapping from each field in a schema to a
field in an underlying base schema (ar.h line 5489).
"""
_fields_ = [
('fieldType', c_uint),
('u', ARFieldMappingUnion)
]
class ARFieldMappingList(Structure):
""""List of 0 or more field mappings (ar.h line 5502)."""
_fields_ = [
('numItems', c_uint),
('mappingList', POINTER(ARFieldMappingStruct))
]
|
|
import random
from collections import OrderedDict
from copy import deepcopy
import gym
from gym_minigrid.roomgrid import RoomGrid
from .verifier import *
class RejectSampling(Exception):
"""
Exception used for rejection sampling
"""
pass
class RoomGridLevel(RoomGrid):
"""
Base for levels based on RoomGrid
A level, given a random seed, generates missions generated from
one or more patterns. Levels should produce a family of missions
of approximately similar difficulty.
"""
def __init__(
self,
room_size=8,
**kwargs
):
super().__init__(
room_size=room_size,
**kwargs
)
def reset(self, **kwargs):
obs = super().reset(**kwargs)
# Recreate the verifier
self.instrs.reset_verifier(self)
# Compute the time step limit based on the maze size and instructions
nav_time_room = self.room_size ** 2
nav_time_maze = nav_time_room * self.num_rows * self.num_cols
num_navs = self.num_navs_needed(self.instrs)
self.max_steps = num_navs * nav_time_maze
return obs
def step(self, action):
obs, reward, done, info = super().step(action)
# If we drop an object, we need to update its position in the environment
if action == self.actions.drop:
self.update_objs_poss()
# If we've successfully completed the mission
status = self.instrs.verify(action)
if status == 'success':
done = True
reward = self._reward()
elif status == 'failure':
done = True
reward = 0
return obs, reward, done, info
def update_objs_poss(self, instr=None):
if instr is None:
instr = self.instrs
if isinstance(instr, BeforeInstr) or isinstance(instr, AndInstr) or isinstance(instr, AfterInstr):
self.update_objs_poss(instr.instr_a)
self.update_objs_poss(instr.instr_b)
else:
instr.update_objs_poss()
def _gen_grid(self, width, height):
# We catch RecursionError to deal with rare cases where
# rejection sampling gets stuck in an infinite loop
while True:
try:
super()._gen_grid(width, height)
# Generate the mission
self.gen_mission()
# Validate the instructions
self.validate_instrs(self.instrs)
except RecursionError as error:
print('Timeout during mission generation:', error)
continue
except RejectSampling as error:
#print('Sampling rejected:', error)
continue
break
# Generate the surface form for the instructions
self.surface = self.instrs.surface(self)
self.mission = self.surface
def validate_instrs(self, instr):
"""
Perform some validation on the generated instructions
"""
# Gather the colors of locked doors
if hasattr(self, 'unblocking') and self.unblocking:
colors_of_locked_doors = []
for i in range(self.num_cols):
for j in range(self.num_rows):
room = self.get_room(i, j)
for door in room.doors:
if door and door.is_locked:
colors_of_locked_doors.append(door.color)
if isinstance(instr, PutNextInstr):
# Resolve the objects referenced by the instruction
instr.reset_verifier(self)
# Check that the objects are not already next to each other
if set(instr.desc_move.obj_set).intersection(
set(instr.desc_fixed.obj_set)):
raise RejectSampling(
"there are objects that match both lhs and rhs of PutNext")
if instr.objs_next():
raise RejectSampling('objs already next to each other')
# Check that we are not asking to move an object next to itself
move = instr.desc_move
fixed = instr.desc_fixed
if len(move.obj_set) == 1 and len(fixed.obj_set) == 1:
if move.obj_set[0] is fixed.obj_set[0]:
raise RejectSampling('cannot move an object next to itself')
if isinstance(instr, ActionInstr):
if not hasattr(self, 'unblocking') or not self.unblocking:
return
# TODO: either relax this a bit or make the bot handle this super corner-y scenarios
# Check that the instruction doesn't involve a key that matches the color of a locked door
potential_objects = ('desc', 'desc_move', 'desc_fixed')
for attr in potential_objects:
if hasattr(instr, attr):
obj = getattr(instr, attr)
if obj.type == 'key' and obj.color in colors_of_locked_doors:
raise RejectSampling('cannot do anything with/to a key that can be used to open a door')
return
if isinstance(instr, SeqInstr):
self.validate_instrs(instr.instr_a)
self.validate_instrs(instr.instr_b)
return
assert False, "unhandled instruction type"
def gen_mission(self):
"""
Generate a mission (instructions and matching environment)
Derived level classes should implement this method
"""
raise NotImplementedError
@property
def level_name(self):
return self.__class__.level_name
@property
def gym_id(self):
return self.__class__.gym_id
def num_navs_needed(self, instr):
"""
Compute the maximum number of navigations needed to perform
a simple or complex instruction
"""
if isinstance(instr, PutNextInstr):
return 2
if isinstance(instr, ActionInstr):
return 1
if isinstance(instr, SeqInstr):
na = self.num_navs_needed(instr.instr_a)
nb = self.num_navs_needed(instr.instr_b)
return na + nb
def open_all_doors(self):
"""
Open all the doors in the maze
"""
for i in range(self.num_cols):
for j in range(self.num_rows):
room = self.get_room(i, j)
for door in room.doors:
if door:
door.is_open = True
def check_objs_reachable(self, raise_exc=True):
"""
Check that all objects are reachable from the agent's starting
position without requiring any other object to be moved
(without unblocking)
"""
# Reachable positions
reachable = set()
# Work list
stack = [self.agent_pos]
while len(stack) > 0:
i, j = stack.pop()
if i < 0 or i >= self.grid.width or j < 0 or j >= self.grid.height:
continue
if (i, j) in reachable:
continue
# This position is reachable
reachable.add((i, j))
cell = self.grid.get(i, j)
# If there is something other than a door in this cell, it
# blocks reachability
if cell and cell.type != 'door':
continue
# Visit the horizontal and vertical neighbors
stack.append((i+1, j))
stack.append((i-1, j))
stack.append((i, j+1))
stack.append((i, j-1))
# Check that all objects are reachable
for i in range(self.grid.width):
for j in range(self.grid.height):
cell = self.grid.get(i, j)
if not cell or cell.type == 'wall':
continue
if (i, j) not in reachable:
if not raise_exc:
return False
raise RejectSampling('unreachable object at ' + str((i, j)))
# All objects reachable
return True
class LevelGen(RoomGridLevel):
"""
Level generator which attempts to produce every possible sentence in
the baby language as an instruction.
"""
def __init__(
self,
room_size=8,
num_rows=3,
num_cols=3,
num_dists=18,
locked_room_prob=0.5,
locations=True,
unblocking=True,
implicit_unlock=True,
action_kinds=['goto', 'pickup', 'open', 'putnext'],
instr_kinds=['action', 'and', 'seq'],
seed=None
):
self.num_dists = num_dists
self.locked_room_prob = locked_room_prob
self.locations = locations
self.unblocking = unblocking
self.implicit_unlock = implicit_unlock
self.action_kinds = action_kinds
self.instr_kinds = instr_kinds
self.locked_room = None
super().__init__(
room_size=room_size,
num_rows=num_rows,
num_cols=num_cols,
seed=seed
)
def gen_mission(self):
if self._rand_float(0, 1) < self.locked_room_prob:
self.add_locked_room()
self.connect_all()
self.add_distractors(num_distractors=self.num_dists, all_unique=False)
# The agent must be placed after all the object to respect constraints
while True:
self.place_agent()
start_room = self.room_from_pos(*self.agent_pos)
# Ensure that we are not placing the agent in the locked room
if start_room is self.locked_room:
continue
break
# If no unblocking required, make sure all objects are
# reachable without unblocking
if not self.unblocking:
self.check_objs_reachable()
# Generate random instructions
self.instrs = self.rand_instr(
action_kinds=self.action_kinds,
instr_kinds=self.instr_kinds
)
def add_locked_room(self):
# Until we've successfully added a locked room
while True:
i = self._rand_int(0, self.num_cols)
j = self._rand_int(0, self.num_rows)
door_idx = self._rand_int(0, 4)
self.locked_room = self.get_room(i, j)
# Don't add a locked door in an external wall
if self.locked_room.neighbors[door_idx] is None:
continue
door, _ = self.add_door(
i, j,
door_idx,
locked=True
)
# Done adding locked room
break
# Until we find a room to put the key
while True:
i = self._rand_int(0, self.num_cols)
j = self._rand_int(0, self.num_rows)
key_room = self.get_room(i, j)
if key_room is self.locked_room:
continue
self.add_object(i, j, 'key', door.color)
break
def rand_obj(self, types=OBJ_TYPES, colors=COLOR_NAMES, max_tries=100):
"""
Generate a random object descriptor
"""
num_tries = 0
# Keep trying until we find a matching object
while True:
if num_tries > max_tries:
raise RecursionError('failed to find suitable object')
num_tries += 1
color = self._rand_elem([None, *colors])
type = self._rand_elem(types)
loc = None
if self.locations and self._rand_bool():
loc = self._rand_elem(LOC_NAMES)
desc = ObjDesc(type, color, loc)
# Find all objects matching the descriptor
objs, poss = desc.find_matching_objs(self)
# The description must match at least one object
if len(objs) == 0:
continue
# If no implicit unlocking is required
if not self.implicit_unlock and self.locked_room:
# Check that at least one object is not in the locked room
pos_not_locked = list(filter(
lambda p: not self.locked_room.pos_inside(*p),
poss
))
if len(pos_not_locked) == 0:
continue
# Found a valid object description
return desc
def rand_instr(
self,
action_kinds,
instr_kinds,
depth=0
):
"""
Generate random instructions
"""
kind = self._rand_elem(instr_kinds)
if kind == 'action':
action = self._rand_elem(action_kinds)
if action == 'goto':
return GoToInstr(self.rand_obj())
elif action == 'pickup':
return PickupInstr(self.rand_obj(types=OBJ_TYPES_NOT_DOOR))
elif action == 'open':
return OpenInstr(self.rand_obj(types=['door']))
elif action == 'putnext':
return PutNextInstr(
self.rand_obj(types=OBJ_TYPES_NOT_DOOR),
self.rand_obj()
)
assert False
elif kind == 'and':
instr_a = self.rand_instr(
action_kinds=action_kinds,
instr_kinds=['action'],
depth=depth+1
)
instr_b = self.rand_instr(
action_kinds=action_kinds,
instr_kinds=['action'],
depth=depth+1
)
return AndInstr(instr_a, instr_b)
elif kind == 'seq':
instr_a = self.rand_instr(
action_kinds=action_kinds,
instr_kinds=['action', 'and'],
depth=depth+1
)
instr_b = self.rand_instr(
action_kinds=action_kinds,
instr_kinds=['action', 'and'],
depth=depth+1
)
kind = self._rand_elem(['before', 'after'])
if kind == 'before':
return BeforeInstr(instr_a, instr_b)
elif kind == 'after':
return AfterInstr(instr_a, instr_b)
assert False
assert False
# Dictionary of levels, indexed by name, lexically sorted
level_dict = OrderedDict()
def register_levels(module_name, globals):
"""
Register OpenAI gym environments for all levels in a file
"""
# Iterate through global names
for global_name in sorted(list(globals.keys())):
if not global_name.startswith('Level_'):
continue
level_name = global_name.split('Level_')[-1]
level_class = globals[global_name]
# Register the levels with OpenAI Gym
gym_id = 'BabyAI-%s-v0' % (level_name)
entry_point = '%s:%s' % (module_name, global_name)
gym.envs.registration.register(
id=gym_id,
entry_point=entry_point,
)
# Add the level to the dictionary
level_dict[level_name] = level_class
# Store the name and gym id on the level class
level_class.level_name = level_name
level_class.gym_id = gym_id
def test():
for idx, level_name in enumerate(level_dict.keys()):
print('Level %s (%d/%d)' % (level_name, idx+1, len(level_dict)))
level = level_dict[level_name]
# Run the mission for a few episodes
rng = random.Random(0)
num_episodes = 0
for i in range(0, 15):
mission = level(seed=i)
# Check that the surface form was generated
assert isinstance(mission.surface, str)
assert len(mission.surface) > 0
obs = mission.reset()
assert obs['mission'] == mission.surface
# Reduce max_steps because otherwise tests take too long
mission.max_steps = min(mission.max_steps, 200)
# Check for some known invalid patterns in the surface form
import re
surface = mission.surface
assert not re.match(r".*pick up the [^ ]*door.*", surface), surface
while True:
action = rng.randint(0, mission.action_space.n - 1)
obs, reward, done, info = mission.step(action)
if done:
obs = mission.reset()
break
num_episodes += 1
# The same seed should always yield the same mission
m0 = level(seed=0)
m1 = level(seed=0)
grid1 = m0.unwrapped.grid
grid2 = m1.unwrapped.grid
assert grid1 == grid2
assert m0.surface == m1.surface
# Check that gym environment names were registered correctly
gym.make('BabyAI-1RoomS8-v0')
gym.make('BabyAI-BossLevel-v0')
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import math_ops
class ReducedShapeTest(tf.test.TestCase):
def _check(self, shape, axes, result):
output = math_ops.reduced_shape(shape, axes=axes)
self.assertAllEqual(output.eval(), result)
def testSimple(self):
with self.test_session():
self._check([3], [], [3])
self._check([3], [0], [1])
self._check([5, 3], [], [5, 3])
self._check([5, 3], [0], [1, 3])
self._check([5, 3], [1], [5, 1])
self._check([5, 3], [0, 1], [1, 1])
def testZeros(self):
"""Check that reduced_shape does the right thing with zero dimensions."""
with self.test_session():
self._check([0], [], [0])
self._check([0], [0], [1])
self._check([0, 3], [], [0, 3])
self._check([0, 3], [0], [1, 3])
self._check([0, 3], [1], [0, 1])
self._check([0, 3], [0, 1], [1, 1])
self._check([3, 0], [], [3, 0])
self._check([3, 0], [0], [1, 0])
self._check([3, 0], [1], [3, 1])
self._check([3, 0], [0, 1], [1, 1])
def testNegAxes(self):
with self.test_session():
self._check([10, 10, 10], [-1], [10, 10, 1])
self._check([10, 10, 10], [-1, 2], [10, 10, 1])
self._check([10, 10, 10], [-1, -1], [10, 10, 1])
self._check([10, 10, 10], [-1, 0], [1, 10, 1])
self._check([10, 10, 10], [-3], [1, 10, 10])
class SumReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.reduce_sum(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.arange(1, 6).reshape([5]).astype(np.float32)
self._compareAll(np_arr, [0])
def testFloatReduce2D(self):
# Create a 2D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [0, 1])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [-1])
self._compareAll(np_arr, [-1, -3])
self._compareAll(np_arr, [-1, 1])
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
def testFloatReduce5D(self):
# Create a 5D array of floats and reduce across some dimensions
np_arr = np.arange(0, 840).reshape([2, 3, 5, 7, 4]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
self._compareAll(np_arr, [1, 2, 3, 4])
self._compareAll(np_arr, [0, 1, 2, 3, 4])
# Simple tests for various types.
def testDoubleReduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.float64)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
def testInt32Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.int32)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
def testComplex64Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.complex64)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = tf.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [-3])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [0, 2])
# Int64??
def _compareGradient(self, shape, sum_shape, reduction_axes):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareGradient(shape, sum_shape, reduction_axes[0])
x = np.arange(1.0, 49.0).reshape(shape).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_sum(t, reduction_axes)
jacob_t, jacob_n = tf.test.compute_gradient(t,
shape,
su,
sum_shape,
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
self._compareGradient([2, 3, 4, 2], [2, 2], [1, 2])
def testGradient2(self):
self._compareGradient([2, 3, 4, 2], [2, 4, 2], [1])
def testGradient3(self):
self._compareGradient([2, 3, 4, 2], [2, 3, 2], [2])
def testGradient4(self):
self._compareGradient([2, 3, 4, 2], [], None)
def testHighRank(self):
# Do a bunch of random high dimensional reductions
np.random.seed(42)
for _ in range(20):
rank = np.random.randint(4, 10 + 1)
axes, = np.nonzero(np.random.randint(2, size=rank))
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
self._compareAll(data, axes)
# Check some particular axis patterns
for rank in 4, 7, 10:
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
for axes in ([], np.arange(rank), np.arange(0, rank, 2),
np.arange(1, rank, 2)):
self._compareAll(data, axes)
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
def testEmptyGradients(self):
with self.test_session():
x = tf.zeros([0, 3])
y = tf.reduce_sum(x, [1])
error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class MeanReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_sum = x
count = 1
for ra in reduction_axes[::-1]:
np_sum = np.sum(np_sum, axis=ra, keepdims=keep_dims)
count *= x.shape[ra]
np_ans = np_sum / count
with self.test_session(use_gpu=use_gpu):
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_mean(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_mean(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_mean(t, [0, 1, 2, 3])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_mean(t, [])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 3, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testEmptyGradients(self):
with self.test_session():
x = tf.zeros([0, 3])
y = tf.reduce_mean(x, [1])
error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class ProdReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims):
np_ans = x
if reduction_axes is None:
np_ans = np.prod(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.prod(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session():
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_prod(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False)
self._compare(x, reduction_axes, True)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
# NOTE(kearnes): divide by 20 so product is a reasonable size
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32) / 20.
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_prod(t, [])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 3, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_prod(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_prod(t, [0, 1, 2, 3])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
# NOTE(kearnes): the current gradient calculation gives NaNs for 0 inputs
x = np.arange(0.0, 48.0).reshape(s).astype(np.float32) / 20.
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_prod(t, [])
jacob_t, _ = tf.test.compute_gradient(t,
s,
su,
[2, 3, 4, 2],
x_init_value=x,
delta=1)
with self.assertRaisesOpError("Tensor had NaN values"):
tf.check_numerics(jacob_t, message="_ProdGrad NaN test").op.run()
def testEmptyGradients(self):
with self.test_session():
x = tf.zeros([0, 3])
y = tf.reduce_prod(x, [1])
error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class MinReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_min(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [1])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 3, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t)
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testEmptyGradients(self):
with self.test_session():
x = tf.zeros([0, 3])
y = tf.reduce_min(x, [1])
error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class MaxReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_max(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [1])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 3, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t)
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testEmptyGradients(self):
with self.test_session():
x = tf.zeros([0, 3])
y = tf.reduce_max(x, [1])
error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class AllReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_all(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class AnyReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_any(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testPartialShapes(self):
# Input shape is unknown.
c_unknown = tf.placeholder(tf.float32)
s_unknown = tf.reduce_sum(c_unknown, [1, 2])
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
# Input shape only has known rank.
c_known_rank = tf.placeholder(tf.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(ndims=3))
s_known_rank = tf.reduce_sum(c_known_rank, [1, 2], keep_dims=True)
self.assertEqual(3, s_known_rank.get_shape().ndims)
# Reduction indices are unknown.
unknown_indices = tf.placeholder(tf.int32)
c_unknown_indices = tf.constant([[10.0], [20.0]])
s_unknown_indices = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().ndims)
def testEmpty(self):
self._compareAll([], [0])
if __name__ == "__main__":
tf.test.main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1beta1.types import endpoint
from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint
from google.cloud.aiplatform_v1beta1.types import endpoint_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class EndpointServiceTransport(abc.ABC):
"""Abstract transport class for EndpointService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_endpoint: gapic_v1.method.wrap_method(
self.create_endpoint, default_timeout=5.0, client_info=client_info,
),
self.get_endpoint: gapic_v1.method.wrap_method(
self.get_endpoint, default_timeout=5.0, client_info=client_info,
),
self.list_endpoints: gapic_v1.method.wrap_method(
self.list_endpoints, default_timeout=5.0, client_info=client_info,
),
self.update_endpoint: gapic_v1.method.wrap_method(
self.update_endpoint, default_timeout=5.0, client_info=client_info,
),
self.delete_endpoint: gapic_v1.method.wrap_method(
self.delete_endpoint, default_timeout=5.0, client_info=client_info,
),
self.deploy_model: gapic_v1.method.wrap_method(
self.deploy_model, default_timeout=5.0, client_info=client_info,
),
self.undeploy_model: gapic_v1.method.wrap_method(
self.undeploy_model, default_timeout=5.0, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_endpoint(
self,
) -> Callable[
[endpoint_service.CreateEndpointRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_endpoint(
self,
) -> Callable[
[endpoint_service.GetEndpointRequest],
Union[endpoint.Endpoint, Awaitable[endpoint.Endpoint]],
]:
raise NotImplementedError()
@property
def list_endpoints(
self,
) -> Callable[
[endpoint_service.ListEndpointsRequest],
Union[
endpoint_service.ListEndpointsResponse,
Awaitable[endpoint_service.ListEndpointsResponse],
],
]:
raise NotImplementedError()
@property
def update_endpoint(
self,
) -> Callable[
[endpoint_service.UpdateEndpointRequest],
Union[gca_endpoint.Endpoint, Awaitable[gca_endpoint.Endpoint]],
]:
raise NotImplementedError()
@property
def delete_endpoint(
self,
) -> Callable[
[endpoint_service.DeleteEndpointRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def deploy_model(
self,
) -> Callable[
[endpoint_service.DeployModelRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def undeploy_model(
self,
) -> Callable[
[endpoint_service.UndeployModelRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("EndpointServiceTransport",)
|
|
from __future__ import print_function, division
import hashlib
import numpy as np
from astropy.table import Table, Column
from ..util.integrate import integrate_linlog_subset
from ..util.interpolate import (interp1d_fast, interp1d_fast_loglog,
interp1d_fast_linlog)
from ..util.functions import (extrap1d_log10, FreezableClass,
is_numpy_array, monotonically_increasing)
from ..util.constants import c, sigma
from astropy import log as logger
class OpticalProperties(FreezableClass):
def __init__(self):
# Wavelengths
self.nu = None
# Opacity to extinction
self.chi = None
# Albedo
self.albedo = None
# Scattering angles
self.mu = None
# Scattering matrix elements
self.P1 = None
self.P2 = None
self.P3 = None
self.P4 = None
# Prevent new attributes
self._freeze()
def __getattr__(self, attribute):
if attribute == 'kappa':
return self.chi * (1. - self.albedo)
elif attribute == 'wav':
return c / self.nu * 1.e4
else:
raise AttributeError(attribute)
def _sort(self):
if self.mu[-1] < self.mu[0]:
self.mu = self.mu[::-1]
self.P1 = self.P1[:, ::-1]
self.P2 = self.P2[:, ::-1]
self.P3 = self.P3[:, ::-1]
self.P4 = self.P4[:, ::-1]
if self.nu[-1] < self.nu[0]:
self.nu = self.nu[::-1]
self.albedo = self.albedo[::-1]
self.chi = self.chi[::-1]
self.P1 = self.P1[::-1, :]
self.P2 = self.P2[::-1, :]
self.P3 = self.P3[::-1, :]
self.P4 = self.P4[::-1, :]
def initialize_scattering_matrix(self):
self.P1 = np.zeros((len(self.nu), len(self.mu)))
self.P2 = np.zeros((len(self.nu), len(self.mu)))
self.P3 = np.zeros((len(self.nu), len(self.mu)))
self.P4 = np.zeros((len(self.nu), len(self.mu)))
def normalize_scattering_matrix(self):
for inu in range(len(self.nu)):
norm = interp1d_fast_linlog(self.mu, self.P1[inu, :], 0.)
with np.errstate(invalid='ignore'):
self.P1[inu, :] /= norm
self.P2[inu, :] /= norm
self.P3[inu, :] /= norm
self.P4[inu, :] /= norm
def truncate_scattering_matrix(self, mu_max):
'''
Remove forward scattering for mu > mu_max
'''
self._sort()
# Loop over wavelengths and reduce scattering cross section
for inu in range(len(self.nu)):
# Find fraction remaining
frac = (integrate_linlog_subset(self.mu, self.P1[inu, :],
self.mu[0], mu_max)
/ integrate_linlog_subset(self.mu, self.P1[inu, :],
self.mu[0], self.mu[-1]))
logger.info("Removing fraction %g" % frac)
# Find scattering and absorption opacities
sigma_nu = self.chi[inu] * self.albedo[inu]
kappa_nu = self.chi[inu] - sigma_nu
# Decrease scattering opacity, total opacity, and hence albedo
sigma_nu *= frac
self.albedo[inu] = sigma_nu / (sigma_nu + kappa_nu)
self.chi[inu] = sigma_nu + kappa_nu
# Interpolate scattering matrix at mu_max
P1_max = np.zeros((len(self.nu), 1))
P2_max = np.zeros((len(self.nu), 1))
P3_max = np.zeros((len(self.nu), 1))
P4_max = np.zeros((len(self.nu), 1))
for inu in range(len(self.nu)):
P1_max[inu, 0] = interp1d_fast_linlog(self.mu, self.P1[inu, :], mu_max)
P2_max[inu, 0] = interp1d_fast(self.mu, self.P2[inu, :], mu_max)
P3_max[inu, 0] = interp1d_fast(self.mu, self.P3[inu, :], mu_max)
P4_max[inu, 0] = interp1d_fast(self.mu, self.P4[inu, :], mu_max)
# Now truncate scattering matrix elements
cut = np.searchsorted(self.mu, mu_max)
self.mu = np.hstack([self.mu[:cut], mu_max])
self.P1 = np.hstack([self.P1[:, :cut], P1_max])
self.P2 = np.hstack([self.P2[:, :cut], P2_max])
self.P3 = np.hstack([self.P3[:, :cut], P3_max])
self.P4 = np.hstack([self.P4[:, :cut], P4_max])
def extrapolate_wav(self, wav1, wav2):
'''
Extrapolate the optical properties to a larger frequency range.
Parameters
----------
wav1, wav2 : float
The range of wavelengths (in microns) to extrapolate the optical
properties to.
Notes
-----
The extrapolation is done in the following way:
* The opacity to extinction (``chi``) is extrapolated by fitting a
power-law to the opacities at the two highest frequencies and
following that power law, and similarly at the lowest
frequencies. This ensures that the slope of the opacity remains
constant.
* The albedo is extrapolated by assuming that the albedo is
constant outside the original range, and is set to the same
value as the values for the lowest and highest frequencies.
* The scattering matrix is extrapolated similarly to the albedo,
by simply extending the values for the lowest and highest
frequencies to the new frequency range.
'''
nu1 = c / max(wav1, wav2) * 1.e4
nu2 = c / min(wav1, wav2) * 1.e4
return self.extrapolate_nu(nu1, nu2)
def extrapolate_nu(self, nu1, nu2):
'''
Extrapolate the optical properties to a larger frequency range.
Parameters
----------
nu1, nu2 : float
The range of frequencies to extrapolate the optical properties to.
Notes
-----
The extrapolation is done in the following way:
* The opacity to extinction (``chi``) is extrapolated by fitting a
power-law to the opacities at the two highest frequencies and
following that power law, and similarly at the lowest
frequencies. This ensures that the slope of the opacity remains
constant.
* The albedo is extrapolated by assuming that the albedo is
constant outside the original range, and is set to the same
value as the values for the lowest and highest frequencies.
* The scattering matrix is extrapolated similarly to the albedo,
by simply extending the values for the lowest and highest
frequencies to the new frequency range.
'''
self._sort()
if nu1 >= self.nu[0]:
logger.info("Lower frequency is inside existing range, no extrapolation will be done at the lowest frequencies")
else:
ex_c = extrap1d_log10(self.nu, self.chi)
self.albedo = np.hstack([self.albedo[0], self.albedo])
self.chi = np.hstack([ex_c(nu1), self.chi])
self.nu = np.hstack([nu1, self.nu])
self.P1 = np.vstack([self.P1[0, :], self.P1])
self.P2 = np.vstack([self.P2[0, :], self.P2])
self.P3 = np.vstack([self.P3[0, :], self.P3])
self.P4 = np.vstack([self.P4[0, :], self.P4])
if nu2 <= self.nu[-1]:
logger.info("Upper frequency is inside existing range, no extrapolation will be done at the highest frequencies")
else:
ex_c = extrap1d_log10(self.nu, self.chi)
self.albedo = np.hstack([self.albedo, self.albedo[-1]])
self.chi = np.hstack([self.chi, ex_c(nu2)])
self.nu = np.hstack([self.nu, nu2])
self.P1 = np.vstack([self.P1, self.P1[-1, :]])
self.P2 = np.vstack([self.P2, self.P2[-1, :]])
self.P3 = np.vstack([self.P3, self.P3[-1, :]])
self.P4 = np.vstack([self.P4, self.P4[-1, :]])
def to_hdf5_group(self, group):
if not self.all_set():
raise Exception("Not all attributes of the optical properties are set")
# Create optical properties table
topt = Table()
topt.add_column(Column(data=self.nu, name='nu'))
topt.add_column(Column(data=self.albedo, name='albedo'))
topt.add_column(Column(data=self.chi, name='chi'))
self.normalize_scattering_matrix()
topt.add_column(Column(data=self.P1, name='P1'))
topt.add_column(Column(data=self.P2, name='P2'))
topt.add_column(Column(data=self.P3, name='P3'))
topt.add_column(Column(data=self.P4, name='P4'))
# Sort by frequency
topt.sort('nu')
# Create scattering angles table and add to table set
tmu = Table()
tmu.add_column(Column(data=self.mu, name='mu'))
# Add to group
topt.write(group, path='optical_properties')
tmu.write(group, path='scattering_angles')
def from_hdf5_group(self, group):
# Read in the scattering angles
tmu = group['scattering_angles']
self.mu = tmu['mu']
# Read in the optical properties
topt = group['optical_properties']
self.nu = topt['nu']
self.albedo = topt['albedo']
self.chi = topt['chi']
self.P1 = topt['P1']
self.P2 = topt['P2']
self.P3 = topt['P3']
self.P4 = topt['P4']
def interp_chi_wav(self, wav):
"Interpolate the opacity to extinction to a given wavelength"
return interp1d_fast_loglog(self.nu, self.chi, c / (wav * 1.e-4))
def interp_kappa_wav(self, wav):
"Interpolate the opacity to absorption to a given wavelength"
return interp1d_fast_loglog(self.nu, self.kappa, c / (wav * 1.e-4))
def interp_chi_nu(self, nu):
"Interpolate the opacity to extinction to a given wavelength"
return interp1d_fast_loglog(self.nu, self.chi, nu)
def interp_kappa_nu(self, nu):
"Interpolate the opacity to absorption to a given wavelength"
return interp1d_fast_loglog(self.nu, self.kappa, nu)
def all_set(self):
return self.get_missing_attributes() == []
def get_missing_attributes(self):
missing = []
for attribute in ['nu', 'chi', 'albedo', 'mu', 'P1', 'P2', 'P3', 'P4']:
if getattr(self, attribute) is None:
missing.append(attribute)
return missing
def ensure_all_set(self):
if not self.all_set():
missing = self.get_missing_attributes()
raise Exception("The following attributes of the optical properties have not been set: {0:s}".format(', '.join(missing)))
def plot(self, figure, subplots):
self.ensure_all_set()
import matplotlib.pyplot as plt
self._sort()
ax = figure.add_subplot(subplots[0])
ax.loglog(self.wav, self.chi, color='blue')
ax2 = ax.twinx()
ax2.plot(self.wav, self.albedo, color='red')
ax.set_xlim(self.wav.min(), self.wav.max())
ax2.set_xlim(self.wav.min(), self.wav.max())
ax2.set_ylim(0., 1.)
ax.set_xlabel('Wavelength (microns)')
ax.set_ylabel('Opacity to extinction (cm^2/g)', color='blue')
ax2.set_ylabel('Albedo', color='red', rotation=-90)
self.normalize_scattering_matrix()
m = plt.cm.gist_heat
vmin, vmax = -2., 2.
ax = figure.add_subplot(subplots[1])
ax.patch.set_facecolor('black')
ax.contourf(self.wav, self.mu,
np.log10(np.clip(np.abs(self.P1.swapaxes(0, 1)), 10. ** vmin, 10. ** vmax)),
np.linspace(vmin, vmax, 30), cmap=m)
ax.set_xscale('log')
ax.set_xlim(self.wav.min(), self.wav.max())
ax.set_ylim(-1., 1.)
ax.set_title('S11', y=0.9, verticalalignment='top', color='white')
ax.set_xlabel('Wavelength (microns)')
ax.set_ylabel('mu')
ax = figure.add_subplot(subplots[2])
ax.patch.set_facecolor('black')
ax.contourf(self.wav, self.mu,
np.log10(np.clip(np.abs(self.P2.swapaxes(0, 1)), 10. ** vmin, 10. ** vmax)),
np.linspace(vmin, vmax, 30), cmap=m)
ax.set_xscale('log')
ax.set_xlim(self.wav.min(), self.wav.max())
ax.set_ylim(-1., 1.)
ax.set_title('S12', y=0.9, verticalalignment='top', color='white')
ax.set_xlabel('Wavelength (microns)')
ax.set_ylabel('mu')
ax = figure.add_subplot(subplots[3])
ax.patch.set_facecolor('black')
ax.contourf(self.wav, self.mu,
np.log10(np.clip(np.abs(self.P3.swapaxes(0, 1)), 10. ** vmin, 10. ** vmax)),
np.linspace(vmin, vmax, 30), cmap=m)
ax.set_xscale('log')
ax.set_xlim(self.wav.min(), self.wav.max())
ax.set_ylim(-1., 1.)
ax.set_title('S33', y=0.9, verticalalignment='top', color='white')
ax.set_xlabel('Wavelength (microns)')
ax.set_ylabel('mu')
ax = figure.add_subplot(subplots[4])
ax.patch.set_facecolor('black')
ax.contourf(self.wav, self.mu,
np.log10(np.clip(np.abs(self.P4.swapaxes(0, 1)), 10. ** vmin, 10. ** vmax)),
np.linspace(vmin, vmax, 30), cmap=m)
ax.set_xscale('log')
ax.set_xlim(self.wav.min(), self.wav.max())
ax.set_ylim(-1., 1.)
ax.set_title('S34', y=0.9, verticalalignment='top', color='white')
ax.set_xlabel('Wavelength (microns)')
ax.set_ylabel('mu')
return figure
def hash(self):
h = hashlib.md5()
h.update(self.nu.tostring())
h.update(self.chi.tostring())
h.update(self.albedo.tostring())
h.update(self.mu.tostring())
h.update(self.P1.tostring())
h.update(self.P2.tostring())
h.update(self.P3.tostring())
h.update(self.P4.tostring())
return h.hexdigest()
def __setattr__(self, attribute, value):
if attribute in ['nu', 'chi', 'albedo', 'mu'] and value is not None:
if type(value) in [list, tuple]:
value = np.array(value)
if not is_numpy_array(value) or value.ndim != 1:
raise ValueError(attribute + " should be a 1-D sequence")
if attribute in ['nu', 'mu'] and value is not None:
if not monotonically_increasing(value):
raise ValueError(attribute + " should be monotonically increasing")
if attribute == 'nu' and value is not None:
if value[0] <= 0.:
raise ValueError('nu should be strictly positive')
if attribute == 'chi' and value is not None:
if value[0] < 0.:
raise ValueError('chi should be positive')
if attribute == 'albedo' and value is not None:
if value[0] < 0. or value[-1] > 1.:
raise ValueError('albedo should be in the range [0:1]')
if attribute == 'mu' and value is not None:
if value[0] < -1. or value[-1] > 1.:
raise ValueError('mu should be in the range [-1:1]')
if attribute in ['P1', 'P2', 'P3', 'P4'] and value is not None:
if self.nu is None:
raise ValueError("nu needs to be set before " + attribute)
if self.mu is None:
raise ValueError("mu needs to be set before " + attribute)
if type(value) in [list, tuple]:
value = np.array(value)
if not is_numpy_array(value) or value.ndim != 2:
raise ValueError(attribute + " should be a 2-D array")
if value.shape[0] != len(self.nu) or value.shape[1] != len(self.mu):
raise ValueError(attribute + " has an incorrect shape: %s but expected (%i, %i)" % (value.shape, len(self.nu), len(self.mu)))
FreezableClass.__setattr__(self, attribute, value)
|
|
from __future__ import annotations
import argparse
import decimal
import importlib
import inspect
import json
import optparse
import os
import tempfile
from collections import namedtuple
from unittest import mock
import pytest
import stomp as stomppy
import workflows
import workflows.transport
from workflows.transport.common_transport import TemporarySubscription
from workflows.transport.stomp_transport import StompTransport
_frame = namedtuple("frame", "headers, body")
def test_lookup_and_initialize_stomp_transport_layer():
"""Find the stomp transport layer via the lookup mechanism and run
its constructor with default settings."""
stomp = workflows.transport.lookup("StompTransport")
assert stomp == StompTransport
stomp()
def test_add_command_line_help_optparse():
"""Check that command line parameters are registered in the parser."""
parser = mock.MagicMock()
StompTransport().add_command_line_options(parser)
parser.add_argument.assert_not_called()
parser.add_option.assert_called()
assert parser.add_option.call_count > 4
for call in parser.add_option.call_args_list:
assert call[1]["action"] == "callback"
def test_add_command_line_help_argparse():
"""Check that command line parameters are registered in the parser."""
parser = mock.MagicMock()
parser.add_argument = mock.Mock()
StompTransport().add_command_line_options(parser)
parser.add_argument.assert_called()
parser.add_option.assert_not_called()
assert parser.add_argument.call_count > 4
for call in parser.add_argument.call_args_list:
assert inspect.isclass(call[1]["action"])
def test_adding_arguments_to_argparser():
"""Check that command line parameters can be added to the parser."""
parser = argparse.ArgumentParser()
StompTransport().add_command_line_options(parser)
result = parser.parse_args([])
assert result.stomp_host
assert result.stomp_port
assert result.stomp_user
assert result.stomp_pass
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_check_config_file_behaviour(mockstomp):
"""Check that a specified configuration file is read, that command line
parameters have precedence and are passed on to the stomp layer."""
mockconn = mock.Mock()
mockstomp.Connection.return_value = mockconn
parser = optparse.OptionParser()
stomp = StompTransport()
stomp.add_command_line_options(parser)
# Temporarily create an example stomp configuration file
cfgfile = tempfile.NamedTemporaryFile(delete=False)
try:
cfgfile.write(
b"""
# An example stomp configuration file
# Only lines in the [stomp] block will be interpreted
[stomp]
#host = 127.0.0.1
port = 1234
username = someuser
password = somesecret
prefix = namespace
"""
)
cfgfile.close()
parser.parse_args(
["--stomp-conf", cfgfile.name, "--stomp-user", mock.sentinel.user]
)
# Command line parameters are shared for all instances
stomp = StompTransport()
stomp.connect()
# Reset configuration for subsequent tests by reloading StompTransport
importlib.reload(workflows.transport.stomp_transport)
globals()["StompTransport"] = workflows.transport.stomp_transport.StompTransport
mockstomp.Connection.assert_called_once_with([("localhost", 1234)])
mockconn.connect.assert_called_once_with(
mock.sentinel.user, "somesecret", wait=False
)
assert stomp.get_namespace() == "namespace"
finally:
os.remove(cfgfile.name)
# Loading a non-existing configuration file
with pytest.raises(workflows.Error):
parser.parse_args(["--stomp-conf", ""])
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_anonymous_connection(mockstomp):
"""Check that a specified configuration file is read, that command line
parameters have precedence and are passed on to the stomp layer."""
mockconn = mock.Mock()
mockstomp.Connection.return_value = mockconn
parser = optparse.OptionParser()
stomp = StompTransport()
stomp.add_command_line_options(parser)
parser.parse_args(["--stomp-user=", "--stomp-pass="])
# Command line parameters are shared for all instances
stomp = StompTransport()
stomp.connect()
# Reset configuration for subsequent tests by reloading StompTransport
importlib.reload(workflows.transport.stomp_transport)
globals()["StompTransport"] = workflows.transport.stomp_transport.StompTransport
mockconn.connect.assert_called_once_with(wait=False)
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_instantiate_link_and_connect_to_broker(mockstomp):
"""Test the Stomp connection routine."""
stomp = StompTransport()
mockconn = mockstomp.Connection.return_value
assert not stomp.is_connected()
stomp.connect()
mockstomp.Connection.assert_called_once()
mockconn.connect.assert_called_once()
assert stomp.is_connected()
stomp.connect()
mockstomp.Connection.assert_called_once()
mockconn.connect.assert_called_once()
assert stomp.is_connected()
stomp.disconnect()
mockstomp.Connection.assert_called_once()
mockconn.connect.assert_called_once()
mockconn.disconnect.assert_called_once()
assert not stomp.is_connected()
stomp.disconnect()
mockstomp.Connection.assert_called_once()
mockconn.connect.assert_called_once()
mockconn.disconnect.assert_called_once()
assert not stomp.is_connected()
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_error_handling_when_connecting_to_broker(mockstomp):
"""Test the Stomp connection routine."""
stomp = StompTransport()
mockconn = mockstomp.Connection.return_value
mockconn.connect.side_effect = stomppy.exception.ConnectFailedException()
mockstomp.exception.ConnectFailedException = (
stomppy.exception.ConnectFailedException
)
with pytest.raises(workflows.Disconnected):
stomp.connect()
assert not stomp.is_connected()
@mock.patch("workflows.transport.stomp_transport.time")
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_broadcast_status(mockstomp, mocktime):
"""Test the status broadcast function."""
mocktime.time.return_value = 20000
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
stomp.broadcast_status({"status": str(mock.sentinel.status)})
mockconn.send.assert_called_once()
args, kwargs = mockconn.send.call_args
# expiration should be 15 seconds in the future
assert int(kwargs["headers"]["expires"]) == 1000 * (20000 + 15)
destination, message = args
assert destination.startswith("/topic/transient.status")
statusdict = json.loads(message)
assert statusdict["status"] == str(mock.sentinel.status)
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_send_message(mockstomp):
"""Test the message sending function."""
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
stomp._send(str(mock.sentinel.channel), mock.sentinel.message)
mockconn.send.assert_called_once()
args, kwargs = mockconn.send.call_args
assert args == ("/queue/" + str(mock.sentinel.channel), mock.sentinel.message)
assert kwargs.get("headers") == {"persistent": "true"}
stomp._send(
str(mock.sentinel.channel),
mock.sentinel.message,
headers={"hdr": mock.sentinel.header},
delay=123,
)
assert mockconn.send.call_count == 2
args, kwargs = mockconn.send.call_args
assert args == ("/queue/" + str(mock.sentinel.channel), mock.sentinel.message)
assert kwargs == {
"headers": {
"hdr": mock.sentinel.header,
"persistent": "true",
"AMQ_SCHEDULED_DELAY": 123000,
}
}
@mock.patch("workflows.transport.stomp_transport.stomp")
@mock.patch("workflows.transport.stomp_transport.time")
def test_sending_message_with_expiration(time, mockstomp):
"""Test sending a message that expires some time in the future."""
system_time = 1234567.1234567
message_lifetime = 120
expiration_time = int((system_time + message_lifetime) * 1000)
time.time.return_value = system_time
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
stomp._send(str(mock.sentinel.channel), mock.sentinel.message, expiration=120)
mockconn.send.assert_called_once()
args, kwargs = mockconn.send.call_args
assert args == ("/queue/" + str(mock.sentinel.channel), mock.sentinel.message)
assert kwargs.get("headers") == {"persistent": "true", "expires": expiration_time}
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_error_handling_on_send(mockstomp):
"""Unrecoverable errors during sending should mark the connection as disconnected."""
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
mockconn.send.side_effect = stomppy.exception.NotConnectedException()
mockstomp.exception = stomppy.exception
with pytest.raises(workflows.Disconnected):
stomp._send(str(mock.sentinel.channel), mock.sentinel.message)
assert not stomp.is_connected()
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_send_broadcast(mockstomp):
"""Test the broadcast sending function."""
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
stomp._broadcast(str(mock.sentinel.channel), mock.sentinel.message)
mockconn.send.assert_called_once()
args, kwargs = mockconn.send.call_args
assert args == ("/topic/" + str(mock.sentinel.channel), mock.sentinel.message)
assert kwargs.get("headers") in (None, {})
stomp._broadcast(
str(mock.sentinel.channel), mock.sentinel.message, headers=mock.sentinel.headers
)
assert mockconn.send.call_count == 2
args, kwargs = mockconn.send.call_args
assert args == ("/topic/" + str(mock.sentinel.channel), mock.sentinel.message)
assert kwargs == {"headers": mock.sentinel.headers}
stomp._broadcast(str(mock.sentinel.channel), mock.sentinel.message, delay=123)
assert mockconn.send.call_count == 3
args, kwargs = mockconn.send.call_args
assert args == ("/topic/" + str(mock.sentinel.channel), mock.sentinel.message)
assert kwargs["headers"].get("AMQ_SCHEDULED_DELAY") == 123000
@mock.patch("workflows.transport.stomp_transport.stomp")
@mock.patch("workflows.transport.stomp_transport.time")
def test_broadcasting_message_with_expiration(time, mockstomp):
"""Test sending a message that expires some time in the future."""
system_time = 1234567.1234567
message_lifetime = 120
expiration_time = int((system_time + message_lifetime) * 1000)
time.time.return_value = system_time
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
stomp._broadcast(str(mock.sentinel.channel), mock.sentinel.message, expiration=120)
mockconn.send.assert_called_once()
args, kwargs = mockconn.send.call_args
assert args == ("/topic/" + str(mock.sentinel.channel), mock.sentinel.message)
assert kwargs.get("headers") == {"expires": expiration_time}
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_error_handling_on_broadcast(mockstomp):
"""Unrecoverable errors during broadcasting should mark the connection as disconnected."""
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
mockconn.send.side_effect = stomppy.exception.NotConnectedException()
mockstomp.exception = stomppy.exception
with pytest.raises(workflows.Disconnected):
stomp._broadcast(str(mock.sentinel.channel), mock.sentinel.message)
assert not stomp.is_connected()
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_messages_are_serialized_for_transport(mockstomp):
"""Test the message serialization."""
banana = {"entry": [1, 2.0, decimal.Decimal(3), "banana"]}
banana_str = '{"entry": [1, 2.0, 3.0, "banana"]}'
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
stomp.send(str(mock.sentinel.channel1), banana)
mockconn.send.assert_called_once()
args, kwargs = mockconn.send.call_args
assert args == ("/queue/" + str(mock.sentinel.channel1), banana_str)
stomp.broadcast(str(mock.sentinel.channel2), banana)
args, kwargs = mockconn.send.call_args
assert args == ("/topic/" + str(mock.sentinel.channel2), banana_str)
with pytest.raises(Exception):
stomp.send(str(mock.sentinel.channel), mock.sentinel.unserializable)
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_messages_are_not_serialized_for_raw_transport(mockstomp):
"""Test the raw sending methods."""
banana = '{"entry": [0, "banana"]}'
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
stomp.raw_send(str(mock.sentinel.channel1), banana)
mockconn.send.assert_called_once()
args, kwargs = mockconn.send.call_args
assert args == ("/queue/" + str(mock.sentinel.channel1), banana)
mockconn.send.reset_mock()
stomp.raw_broadcast(str(mock.sentinel.channel2), banana)
mockconn.send.assert_called_once()
args, kwargs = mockconn.send.call_args
assert args == ("/topic/" + str(mock.sentinel.channel2), banana)
mockconn.send.reset_mock()
stomp.raw_send(str(mock.sentinel.channel), mock.sentinel.unserializable)
mockconn.send.assert_called_once()
args, kwargs = mockconn.send.call_args
assert args == (
"/queue/" + str(mock.sentinel.channel),
mock.sentinel.unserializable,
)
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_messages_are_deserialized_after_transport(mockstomp):
"""Test the message serialization."""
banana = {"entry": [0, "banana"]}
banana_str = '{"entry": [0, "banana"]}'
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
message_handler = mockconn.set_listener.call_args[0][1].on_message
# Test subscriptions
callback = mock.Mock()
stomp.subscribe("channel", callback)
subscription_id = mockconn.subscribe.call_args[0][1]
message_handler(_frame({"subscription": subscription_id}, banana_str))
callback.assert_called_once_with({"subscription": subscription_id}, banana)
message_handler(
_frame({"subscription": subscription_id}, mock.sentinel.undeserializable)
)
callback.assert_called_with(
{"subscription": subscription_id}, mock.sentinel.undeserializable
)
# Test broadcast subscriptions
callback = mock.Mock()
stomp.subscribe_broadcast("channel", callback)
subscription_id = mockconn.subscribe.call_args[0][1]
message_handler(_frame({"subscription": subscription_id}, banana_str))
callback.assert_called_once_with({"subscription": subscription_id}, banana)
message_handler(
_frame({"subscription": subscription_id}, mock.sentinel.undeserializable)
)
callback.assert_called_with(
{"subscription": subscription_id}, mock.sentinel.undeserializable
)
# Test subscriptions with mangling disabled
callback = mock.Mock()
stomp.subscribe("channel", callback, disable_mangling=True)
subscription_id = mockconn.subscribe.call_args[0][1]
message_handler(_frame({"subscription": subscription_id}, banana_str))
callback.assert_called_once_with({"subscription": subscription_id}, banana_str)
# Test broadcast subscriptions with mangling disabled
callback = mock.Mock()
stomp.subscribe_broadcast("channel", callback, disable_mangling=True)
subscription_id = mockconn.subscribe.call_args[0][1]
message_handler(_frame({"subscription": subscription_id}, banana_str))
callback.assert_called_once_with({"subscription": subscription_id}, banana_str)
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_subscribe_to_queue(mockstomp):
"""Test subscribing to a queue (producer-consumer), callback functions and unsubscribe."""
mock_cb1 = mock.Mock()
mock_cb2 = mock.Mock()
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
def callback_resolver(cbid):
if cbid == 1:
return mock_cb1
if cbid == 2:
return mock_cb2
raise ValueError("Unknown subscription ID %r" % cbid)
stomp.subscription_callback = callback_resolver
mockconn.set_listener.assert_called_once()
listener = mockconn.set_listener.call_args[0][1]
assert listener is not None
stomp._subscribe(
1,
str(mock.sentinel.channel1),
mock_cb1,
)
mockconn.subscribe.assert_called_once()
args, kwargs = mockconn.subscribe.call_args
assert args == ("/queue/" + str(mock.sentinel.channel1), 1)
assert kwargs == {
"headers": {},
"ack": "auto",
}
stomp._subscribe(
2,
str(mock.sentinel.channel2),
mock_cb2,
retroactive=True,
selector=mock.sentinel.selector,
exclusive=True,
priority=42,
)
assert mockconn.subscribe.call_count == 2
args, kwargs = mockconn.subscribe.call_args
assert args == ("/queue/" + str(mock.sentinel.channel2), 2)
assert kwargs == {
"headers": {
"activemq.retroactive": "true",
"selector": mock.sentinel.selector,
"activemq.exclusive": "true",
"activemq.priority": 42,
},
"ack": "auto",
}
assert mock_cb1.call_count == 0
listener.on_message(_frame({"subscription": 1}, mock.sentinel.message1))
mock_cb1.assert_called_once_with({"subscription": 1}, mock.sentinel.message1)
assert mock_cb2.call_count == 0
listener.on_message(_frame({"subscription": 2}, mock.sentinel.message2))
mock_cb2.assert_called_once_with({"subscription": 2}, mock.sentinel.message2)
stomp._subscribe(3, str(mock.sentinel.channel3), mock_cb2, acknowledgement=True)
assert mockconn.subscribe.call_count == 3
args, kwargs = mockconn.subscribe.call_args
assert args == ("/queue/" + str(mock.sentinel.channel3), 3)
assert kwargs == {"headers": {}, "ack": "client-individual"}
stomp._unsubscribe(1)
mockconn.unsubscribe.assert_called_once_with(id=1)
stomp._unsubscribe(2)
mockconn.unsubscribe.assert_called_with(id=2)
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_subscribe_to_broadcast(mockstomp):
"""Test subscribing to a topic (publish-subscribe) and callback functions."""
mock_cb1 = mock.Mock()
mock_cb2 = mock.Mock()
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
def callback_resolver(cbid):
if cbid == 1:
return mock_cb1
if cbid == 2:
return mock_cb2
raise ValueError("Unknown subscription ID %r" % cbid)
stomp.subscription_callback = callback_resolver
mockconn.set_listener.assert_called_once()
listener = mockconn.set_listener.call_args[0][1]
assert listener is not None
stomp._subscribe_broadcast(
1,
str(mock.sentinel.channel1),
mock_cb1,
)
mockconn.subscribe.assert_called_once()
args, kwargs = mockconn.subscribe.call_args
assert args == ("/topic/" + str(mock.sentinel.channel1), 1)
assert kwargs == {"headers": {}}
stomp._subscribe_broadcast(
2, str(mock.sentinel.channel2), mock_cb2, retroactive=True
)
assert mockconn.subscribe.call_count == 2
args, kwargs = mockconn.subscribe.call_args
assert args == ("/topic/" + str(mock.sentinel.channel2), 2)
assert kwargs == {"headers": {"activemq.retroactive": "true"}}
assert mock_cb1.call_count == 0
listener.on_message(_frame({"subscription": 1}, mock.sentinel.message1))
mock_cb1.assert_called_once_with({"subscription": 1}, mock.sentinel.message1)
assert mock_cb2.call_count == 0
listener.on_message(_frame({"subscription": 2}, mock.sentinel.message2))
mock_cb2.assert_called_once_with({"subscription": 2}, mock.sentinel.message2)
stomp._unsubscribe(1)
mockconn.unsubscribe.assert_called_once_with(id=1)
stomp._unsubscribe(2)
mockconn.unsubscribe.assert_called_with(id=2)
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_subscribe_to_temporary_queue(mockstomp):
"""Test subscribing to a topic (publish-subscribe) and callback functions."""
mock_cb = mock.Mock()
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
known_subscriptions = set()
known_queues = set()
def assert_not_seen_before(ts: TemporarySubscription):
assert ts.subscription_id, "Temporary subscription is missing an ID"
assert (
ts.subscription_id not in known_subscriptions
), "Duplicate subscription ID"
assert ts.queue_name, "Temporary queue does not have a name"
assert ts.queue_name not in known_queues, "Duplicate temporary queue name"
known_subscriptions.add(ts.subscription_id)
known_queues.add(ts.queue_name)
print(f"Temporary subscription: {ts}")
mockconn.set_listener.assert_called_once()
listener = mockconn.set_listener.call_args[0][1]
assert listener is not None
ts = {}
for n, queue_hint in enumerate(
("", "", "hint", "hint", "transient.hint", "transient.hint")
):
ts[n] = stomp.subscribe_temporary(
channel_hint=queue_hint,
callback=mock_cb,
)
assert_not_seen_before(ts[n])
assert ts[n].queue_name.startswith("transient.")
return
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_transaction_calls(mockstomp):
"""Test that calls to create, commit, abort transactions are passed to stomp properly."""
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
stomp._transaction_begin(mock.sentinel.txid)
mockconn.begin.assert_called_once_with(transaction=mock.sentinel.txid)
stomp._send("destination", mock.sentinel.message, transaction=mock.sentinel.txid)
mockconn.send.assert_called_once_with(
"/queue/destination",
mock.sentinel.message,
headers={"persistent": "true"},
transaction=mock.sentinel.txid,
)
stomp._transaction_abort(mock.sentinel.txid)
mockconn.abort.assert_called_once_with(mock.sentinel.txid)
stomp._transaction_commit(mock.sentinel.txid)
mockconn.commit.assert_called_once_with(mock.sentinel.txid)
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_ack_message(mockstomp):
"""Test that the _ack function is properly forwarded to stomp."""
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
subid = stomp._subscribe(1, str(mock.sentinel.channel3), None, acknowledgement=True)
stomp._ack(mock.sentinel.messageid, subid)
mockconn.ack.assert_called_once_with(mock.sentinel.messageid, subid)
stomp._ack(mock.sentinel.messageid, subid, transaction=mock.sentinel.txn)
mockconn.ack.assert_called_with(
mock.sentinel.messageid, subid, transaction=mock.sentinel.txn
)
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_nack_message(mockstomp):
"""Test that the _nack function is properly forwarded to stomp."""
stomp = StompTransport()
stomp.connect()
mockconn = mockstomp.Connection.return_value
subid = stomp._subscribe(1, str(mock.sentinel.channel3), None, acknowledgement=True)
stomp._nack(mock.sentinel.messageid, subid)
mockconn.nack.assert_called_once_with(mock.sentinel.messageid, subid)
stomp._nack(mock.sentinel.messageid, subid, transaction=mock.sentinel.txn)
mockconn.nack.assert_called_with(
mock.sentinel.messageid, subid, transaction=mock.sentinel.txn
)
@mock.patch("workflows.transport.stomp_transport.stomp")
def test_namespace_is_used_correctly(mockstomp):
"""Test that a configured namespace is correctly used when subscribing and sending messages."""
mockconn = mockstomp.Connection.return_value
StompTransport.defaults["--stomp-prfx"] = ""
stomp = StompTransport()
stomp.connect()
assert stomp.get_namespace() == ""
StompTransport.defaults["--stomp-prfx"] = "ns."
stomp = StompTransport()
stomp.connect()
assert stomp.get_namespace() == "ns"
stomp._send("some_queue", mock.sentinel.message1)
mockconn.send.assert_called_once()
assert mockconn.send.call_args[0] == (
"/queue/ns.some_queue",
mock.sentinel.message1,
)
stomp._send("some_queue", mock.sentinel.message2, ignore_namespace=True)
assert mockconn.send.call_args[0] == ("/queue/some_queue", mock.sentinel.message2)
StompTransport.defaults["--stomp-prfx"] = "ns"
stomp = StompTransport()
stomp.connect()
assert stomp.get_namespace() == "ns"
stomp._send("some_queue", mock.sentinel.message1)
assert mockconn.send.call_args[0] == (
"/queue/ns.some_queue",
mock.sentinel.message1,
)
stomp._broadcast("some_topic", mock.sentinel.message2)
assert mockconn.send.call_args[0] == (
"/topic/ns.some_topic",
mock.sentinel.message2,
)
stomp._broadcast("some_topic", mock.sentinel.message3, ignore_namespace=True)
assert mockconn.send.call_args[0] == ("/topic/some_topic", mock.sentinel.message3)
stomp._subscribe(1, "sub_queue", None)
mockconn.subscribe.assert_called_once()
assert mockconn.subscribe.call_args[0] == ("/queue/ns.sub_queue", 1)
stomp._subscribe(2, "sub_queue", None, ignore_namespace=True)
assert mockconn.subscribe.call_args[0] == ("/queue/sub_queue", 2)
stomp._subscribe_broadcast(3, "sub_topic", None)
assert mockconn.subscribe.call_args[0] == ("/topic/ns.sub_topic", 3)
stomp._subscribe_broadcast(4, "sub_topic", None, ignore_namespace=True)
assert mockconn.subscribe.call_args[0] == ("/topic/sub_topic", 4)
stomp.broadcast_status("some status")
assert mockconn.send.call_args[0] == ("/topic/ns.transient.status", '"some status"')
|
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shlex, subprocess
import sys
import p4c_src.util as util
class BackendDriver:
"""A class that has a list of passes that need to be run. Each
backend configures the commands that wants to be run.
Backends may instantiate this class and override the processing of
command line options.
Each pass builds a command line that is invoked as a separate
process. Each pass also allows invoking a pre and post processing
step to setup and cleanup after each command.
"""
def __init__(self, target, arch, argParser = None):
self._target = target
self._arch = arch
self._backend = target + '-' + arch
self._commands = {}
self._commandsEnabled = []
self._preCmds = {}
self._postCmds = {}
self._argParser = argParser
self._argGroup = None
# options
self._dry_run = False
self._output_directory = "./"
self._source_filename = None
self._source_basename = None
self._verbose = False
self._run_preprocessor_only = False
def __str__(self):
return self._backend
def add_command(self, cmd_name, cmd):
""" Add a command
If the command was previously set, it is overwritten
"""
if cmd_name in self._commands:
print("Warning: overwriting command", cmd_name, file=sys.stderr)
self._commands[cmd_name] = []
self._commands[cmd_name].append(cmd)
def add_command_option(self, cmd_name, option):
""" Add an option to a command
"""
if cmd_name not in self._commands:
if self._verbose:
print("Command", "'" + cmd_name + "'", \
"was not set for target", self._backend, file=sys.stderr)
return
self._commands[cmd_name].append(option)
def add_command_line_options(self):
""" Method for derived classes to add options to the parser
"""
self._argGroup = self._argParser.add_argument_group(title = self._backend)
def process_command_line_options(self, opts):
""" Process all command line options
"""
self._dry_run = opts.dry_run
self._verbose = opts.debug
self._output_directory = opts.output_directory
self._source_filename = opts.source_file
self._source_basename = os.path.splitext(os.path.basename(opts.source_file))[0]
self._run_preprocessor_only = opts.run_preprocessor_only
# set preprocessor options
if 'preprocessor' in self._commands:
for option in opts.preprocessor_options:
self.add_command_option('preprocessor', option)
# set compiler options.
for option in opts.compiler_options:
self.add_command_option('compiler', option)
# set debug info
if opts.debug_info:
for c in self._commands:
if c == 'assembler' or c == 'compiler' or c == 'linker':
self.add_command_option(c, "-g")
# set assembler options
if 'assembler' in self._commands:
for option in opts.assembler_options:
self.add_command_option('assembler', option)
# set linker options
if 'linker' in self._commands:
for option in opts.linker_options:
self.add_command_option('linker', option)
# append to the list of defines
for d in opts.preprocessor_defines:
self.add_command_option('preprocessor', "-D"+d)
self.add_command_option('compiler', "-D"+d)
# Preserve comments: -C
# Unix and std C keywords should be allowed in P4 (-undef and -nostdinc)
# Allow using ' for constants rather than delimiters for strings (-x assembler-with-cpp)
self.add_command_option('preprocessor', '-C -undef -nostdinc -x assembler-with-cpp')
# default search path
if opts.language == 'p4-16':
self.add_command_option('preprocessor',
"-I {}".format(os.environ['P4C_16_INCLUDE_PATH']))
self.add_command_option('compiler',
"-I {}".format(os.environ['P4C_16_INCLUDE_PATH']))
else:
self.add_command_option('preprocessor',
"-I {}".format(os.environ['P4C_14_INCLUDE_PATH']))
self.add_command_option('compiler',
"-I {}".format(os.environ['P4C_14_INCLUDE_PATH']))
# append search path
for path in opts.search_path:
self.add_command_option('preprocessor', "-I")
self.add_command_option('preprocessor', path)
self.add_command_option('compiler', "-I")
self.add_command_option('compiler', path)
# set p4 version
if opts.language == 'p4-16':
self.add_command_option('compiler', "--p4v=16")
else:
self.add_command_option('compiler', "--p4v=14")
# P4Runtime options
if opts.p4runtime_file:
print("'--p4runtime-file' and '--p4runtime-format'", \
"are deprecated, consider using '--p4runtime-files'", file=sys.stderr)
self.add_command_option('compiler',
"--p4runtime-file {}".format(opts.p4runtime_file))
self.add_command_option('compiler',
"--p4runtime-format {}".format(opts.p4runtime_format))
if opts.p4runtime_files:
self.add_command_option('compiler',
"--p4runtime-files {}".format(opts.p4runtime_files))
# disable annotations
if opts.disabled_annos is not None:
self.add_command_option('compiler',
'--disable-annotations={}'.format(opts.disabled_annos))
# enable parser inlining optimization
if opts.optimizeParserInlining:
self.add_command_option('compiler', '--parser-inline-opt')
# set developer options
if (os.environ['P4C_BUILD_TYPE'] == "DEVELOPER"):
for option in opts.log_levels:
self.add_command_option('compiler', "-T{}".format(option))
if opts.passes:
self.add_command_option('compiler', "--top4 {}".format(",".join(opts.passes)))
if opts.debug:
self.add_command_option('compiler', "-vvv")
if opts.dump_dir:
self.add_command_option('compiler', "--dump {}".format(opts.dump_dir))
if opts.json:
self.add_command_option('compiler', "--toJSON {}".format(opts.json))
if opts.json_source:
self.add_command_option('compiler', "--fromJSON {}".format(opts.json_source))
if opts.pretty_print:
self.add_command_option('compiler', "--pp {}".format(opts.pretty_print))
if opts.ndebug_mode:
self.add_command_option('compiler', "--ndebug")
if (os.environ['P4C_BUILD_TYPE'] == "DEVELOPER") and \
'assembler' in self._commands and opts.debug:
self.add_command_option('assembler', "-vvv")
# handle mode flags
if opts.run_preprocessor_only:
self.enable_commands(['preprocessor'])
elif opts.skip_preprocessor:
self.disable_commands(['preprocessor'])
elif opts.run_till_assembler:
self.enable_commands(['preprocessor', 'compiler'])
elif opts.run_all:
# this is the default, each backend driver is supposed to enable all
# its commands and the order in which they execute
pass
def should_not_check_input(self, opts):
"""
Custom backends can use this function to implement their own --help* options
which don't require input file to be specified. In such cases, this function
should be overloaded and return true whenever such option has been specified by
the user.
As a result, dummy.p4 will be used as a source file to prevent sanity checking
from failing.
"""
return False
def enable_commands(self, cmdsEnabled):
"""
Defines the order in which the steps are executed and which commands
are going to run
"""
newCmds = [c for c in cmdsEnabled if c in self._commands]
if len(newCmds) > 0:
self._commandsEnabled = newCmds
def disable_commands(self, cmdsDisabled):
"""
Disables the commands in cmdsDisabled
"""
for c in cmdsDisabled:
if c in self._commandsEnabled:
self._commandsEnabled.remove(c)
def runCmd(self, step, cmd):
"""
Run a command, capture its output and print it
Also exit with the command error code if failed
"""
if self._dry_run:
print('{}:\n{}'.format(step, ' '.join(cmd)))
return 0
args = shlex.split(" ".join(cmd))
try:
p = subprocess.Popen(args)
except:
import traceback
print("error invoking {}".format(" ".join(cmd)), file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return 1
if self._verbose: print('running {}'.format(' '.join(cmd)))
p.communicate() # now wait
return p.returncode
def preRun(self, cmd_name):
"""
Preamble to a command to setup anything needed
"""
if cmd_name not in self._preCmds:
return # nothing to do
cmds = self._preCmds[cmd_name]
for c in cmds:
rc = self.runCmd(cmd_name, c)
if rc != 0:
sys.exit(rc)
def postRun(self, cmd_name):
"""
Postamble to a command to cleanup
"""
if cmd_name not in self._postCmds:
return # nothing to do
cmds = self._postCmds[cmd_name]
rc = 0
for c in cmds:
rc += self.runCmd(cmd_name, c)
# we will continue to run post commands even if some fail
# so that we do all the cleanup
return rc # \TODO should we fail on this or not?
def run(self):
"""
Run the set of commands required by this driver
"""
# set output directory
if not os.path.exists(self._output_directory) and not self._run_preprocessor_only:
os.makedirs(self._output_directory)
for c in self._commandsEnabled:
# run the setup for the command
self.preRun(c)
# run the command
cmd = self._commands[c]
if cmd[0].find('/') != 0 and (util.find_bin(cmd[0]) == None):
print("{}: command not found".format(cmd[0]), file=sys.stderr)
sys.exit(1)
rc = self.runCmd(c, cmd)
# run the cleanup whether the command succeeded or failed
postrc = self.postRun(c)
# if the main command failed, stop and return its error code so that
# backends that override run can chose what to do on error
if rc != 0:
return rc
return 0
|
|
#!/usr/bin/env python
''' Installation script for dipy package '''
import numpy as np
import os
import sys
from copy import deepcopy
from os.path import join as pjoin, dirname
from glob import glob
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
# Get version and release info, which is all stored in dipy/info.py
ver_file = os.path.join('dipy', 'info.py')
# Use exec for compabibility with Python 3
exec(open(ver_file).read())
# force_setuptools can be set from the setup_egg.py script
if not 'force_setuptools' in globals():
# For some commands, always use setuptools
if len(set(('develop', 'bdist_egg', 'bdist_rpm', 'bdist', 'bdist_dumb',
'bdist_mpkg', 'bdist_wheel', 'install_egg_info', 'egg_info',
'easy_install')).intersection(sys.argv)) > 0:
force_setuptools = True
else:
force_setuptools = False
if force_setuptools:
import setuptools
# We may just have imported setuptools, or we may have been exec'd from a
# setuptools environment like pip
if 'setuptools' in sys.modules:
# Try to preempt setuptools monkeypatching of Extension handling when Pyrex
# is missing. Otherwise the monkeypatched Extension will change .pyx
# filenames to .c filenames, and we probably don't have the .c files.
sys.path.insert(0, pjoin(dirname(__file__), 'fake_pyrex'))
# Set setuptools extra arguments
nibabel_spec = 'nibabel>=' + NIBABEL_MIN_VERSION
extra_setuptools_args = dict(
tests_require=['nose'],
test_suite='nose.collector',
zip_safe=False,
extras_require = dict(
doc=['Sphinx>=1.0'],
test=['nose>=0.10.1']),
install_requires = [nibabel_spec])
# I removed numpy and scipy from install requires because easy_install seems
# to want to fetch these if they are already installed, meaning of course
# that there's a long fragile and unnecessary compile before the install
# finishes.
# We need setuptools install command because we're going to override it
# further down. Using distutils install command causes some confusion, due
# to the Pyrex / setuptools hack above (force_setuptools)
from setuptools.command import install
# If running setuptools and nibabel is not installed, we have to force
# setuptools to install nibabel locally for the script to continue. This
# hack is from
# http://stackoverflow.com/questions/12060925/best-way-to-share-code-across-several-setup-py-scripts
# with thanks
from setuptools.dist import Distribution
Distribution(dict(setup_requires=nibabel_spec))
else:
extra_setuptools_args = {}
from distutils.command import install
# Import distutils _after_ potential setuptools import above, and after removing
# MANIFEST
from distutils.core import setup
from distutils.extension import Extension
from distutils.command import build_py, build_ext
from cythexts import cyproc_exts, get_pyx_sdist, derror_maker
from setup_helpers import install_scripts_bat, add_flag_checking
# Define extensions
EXTS = []
# We use some defs from npymath, but we don't want to link against npymath lib
ext_kwargs = {'include_dirs':[np.get_include()]}
ext_kwargs['include_dirs'].append('src')
for modulename, other_sources, language in (
('dipy.reconst.peak_direction_getter', [], 'c'),
('dipy.reconst.recspeed', [], 'c'),
('dipy.reconst.vec_val_sum', [], 'c'),
('dipy.reconst.quick_squash', [], 'c'),
('dipy.tracking.distances', [], 'c'),
('dipy.tracking.streamlinespeed', [], 'c'),
('dipy.tracking.local.localtrack', [], 'c'),
('dipy.tracking.local.direction_getter', [], 'c'),
('dipy.tracking.local.tissue_classifier', [], 'c'),
('dipy.tracking.local.interpolation', [], 'c'),
('dipy.tracking.vox2track', [], 'c'),
('dipy.tracking.propspeed', [], 'c'),
('dipy.segment.cythonutils', [], 'c'),
('dipy.segment.featurespeed', [], 'c'),
('dipy.segment.metricspeed', [], 'c'),
('dipy.segment.clusteringspeed', [], 'c'),
('dipy.segment.clustering_algorithms', [], 'c'),
('dipy.denoise.denspeed', [], 'c'),
('dipy.align.vector_fields', [], 'c'),
('dipy.align.sumsqdiff', [], 'c'),
('dipy.align.expectmax', [], 'c'),
('dipy.align.crosscorr', [], 'c'),
('dipy.align.bundlemin', [], 'c'),
('dipy.align.transforms', [], 'c'),
('dipy.align.parzenhist', [], 'c')):
pyx_src = pjoin(*modulename.split('.')) + '.pyx'
EXTS.append(Extension(modulename, [pyx_src] + other_sources,
language=language,
**deepcopy(ext_kwargs))) # deepcopy lists
# Do our own build and install time dependency checking. setup.py gets called in
# many different ways, and may be called just to collect information (egg_info).
# We need to set up tripwires to raise errors when actually doing things, like
# building, rather than unconditionally in the setup.py import or exec
# We may make tripwire versions of build_ext, build_py, install
try:
from nisext.sexts import package_check, get_comrec_build
except ImportError: # No nibabel
msg = ('Need nisext package from nibabel installation'
' - please install nibabel first')
pybuilder = derror_maker(build_py.build_py, msg)
extbuilder = derror_maker(build_ext.build_ext, msg)
def package_check(*args, **kwargs):
raise RuntimeError(msg + " or try 'python setup_egg.py install'")
else: # We have nibabel
pybuilder = get_comrec_build('dipy')
# Cython is a dependency for building extensions, iff we don't have stamped
# up pyx and c files.
build_ext = cyproc_exts(EXTS, CYTHON_MIN_VERSION, 'pyx-stamps')
# Add openmp flags if they work
simple_test_c = """int main(int argc, char** argv) { return(0); }"""
omp_test_c = """#include <omp.h>
int main(int argc, char** argv) { return(0); }"""
extbuilder = add_flag_checking(
build_ext, [[['/arch:SSE2'], [], simple_test_c, 'USING_VC_SSE2'],
[['-msse2', '-mfpmath=sse'], [], simple_test_c, 'USING_GCC_SSE2'],
[['-fopenmp'], ['-fopenmp'], omp_test_c, 'HAVE_OPENMP']], 'dipy')
# Installer that checks for install-time dependencies
class installer(install.install):
def run(self):
package_check('numpy', NUMPY_MIN_VERSION)
package_check('scipy', SCIPY_MIN_VERSION)
package_check('nibabel', NIBABEL_MIN_VERSION)
install.install.run(self)
cmdclass = dict(
build_py=pybuilder,
build_ext=extbuilder,
install=installer,
install_scripts=install_scripts_bat,
sdist=get_pyx_sdist(include_dirs=['src']))
def main(**extra_args):
setup(name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
version=VERSION,
requires=REQUIRES,
provides=PROVIDES,
packages = ['dipy',
'dipy.tests',
'dipy.align',
'dipy.align.tests',
'dipy.core',
'dipy.core.tests',
'dipy.direction',
'dipy.direction.tests',
'dipy.tracking',
'dipy.tracking.local',
'dipy.tracking.local.tests',
'dipy.tracking.tests',
'dipy.tracking.benchmarks',
'dipy.reconst',
'dipy.reconst.benchmarks',
'dipy.reconst.tests',
'dipy.io',
'dipy.io.tests',
'dipy.viz',
'dipy.viz.tests',
'dipy.testing',
'dipy.testing.tests',
'dipy.boots',
'dipy.data',
'dipy.utils',
'dipy.utils.tests',
'dipy.fixes',
'dipy.external',
'dipy.external.tests',
'dipy.segment',
'dipy.segment.benchmarks',
'dipy.segment.tests',
'dipy.sims',
'dipy.sims.tests',
'dipy.denoise',
'dipy.denoise.tests'],
ext_modules = EXTS,
# The package_data spec has no effect for me (on python 2.6) -- even
# changing to data_files doesn't get this stuff included in the source
# distribution -- not sure if it has something to do with the magic
# above, but distutils is surely the worst piece of code in all of
# python -- duplicating things into MANIFEST.in but this is admittedly
# only a workaround to get things started -- not a solution
package_data = {'dipy':
[pjoin('data', 'files', '*')
]},
data_files=[('share/doc/dipy/examples',
glob(pjoin('doc','examples','*.py')))],
scripts = [pjoin('bin', 'dipy_peak_extraction'),
pjoin('bin', 'dipy_fit_tensor'),
pjoin('bin', 'dipy_sh_estimate'),
pjoin('bin', 'dipy_quickbundles')],
cmdclass = cmdclass,
**extra_args
)
#simple way to test what setup will do
#python setup.py install --prefix=/tmp
if __name__ == "__main__":
main(**extra_setuptools_args)
|
|
from django.db.models import Field, FloatField
from django.db.models.expressions import CombinedExpression, Func, Value
from django.db.models.lookups import Lookup
class SearchVectorExact(Lookup):
lookup_name = 'exact'
def process_rhs(self, qn, connection):
if not hasattr(self.rhs, 'resolve_expression'):
config = getattr(self.lhs, 'config', None)
self.rhs = SearchQuery(self.rhs, config=config)
rhs, rhs_params = super().process_rhs(qn, connection)
return rhs, rhs_params
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return '%s @@ %s = true' % (lhs, rhs), params
class SearchVectorField(Field):
def db_type(self, connection):
return 'tsvector'
class SearchQueryField(Field):
def db_type(self, connection):
return 'tsquery'
class SearchVectorCombinable:
ADD = '||'
def _combine(self, other, connector, reversed):
if not isinstance(other, SearchVectorCombinable) or not self.config == other.config:
raise TypeError('SearchVector can only be combined with other SearchVectors')
if reversed:
return CombinedSearchVector(other, connector, self, self.config)
return CombinedSearchVector(self, connector, other, self.config)
class SearchVector(SearchVectorCombinable, Func):
function = 'to_tsvector'
arg_joiner = ", ' ',"
template = '%(function)s(concat(%(expressions)s))'
output_field = SearchVectorField()
config = None
def __init__(self, *expressions, **extra):
super().__init__(*expressions, **extra)
self.config = self.extra.get('config', self.config)
weight = self.extra.get('weight')
if weight is not None and not hasattr(weight, 'resolve_expression'):
weight = Value(weight)
self.weight = weight
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
resolved = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
if self.config:
if not hasattr(self.config, 'resolve_expression'):
resolved.config = Value(self.config).resolve_expression(query, allow_joins, reuse, summarize, for_save)
else:
resolved.config = self.config.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return resolved
def as_sql(self, compiler, connection, function=None, template=None):
config_params = []
if template is None:
if self.config:
config_sql, config_params = compiler.compile(self.config)
template = "%(function)s({}::regconfig, concat(%(expressions)s))".format(config_sql.replace('%', '%%'))
else:
template = self.template
sql, params = super().as_sql(compiler, connection, function=function, template=template)
extra_params = []
if self.weight:
weight_sql, extra_params = compiler.compile(self.weight)
sql = 'setweight({}, {})'.format(sql, weight_sql)
return sql, config_params + params + extra_params
class CombinedSearchVector(SearchVectorCombinable, CombinedExpression):
def __init__(self, lhs, connector, rhs, config, output_field=None):
self.config = config
super().__init__(lhs, connector, rhs, output_field)
class SearchQueryCombinable:
BITAND = '&&'
BITOR = '||'
def _combine(self, other, connector, reversed):
if not isinstance(other, SearchQueryCombinable):
raise TypeError(
'SearchQuery can only be combined with other SearchQuerys, '
'got {}.'.format(type(other))
)
if reversed:
return CombinedSearchQuery(other, connector, self, self.config)
return CombinedSearchQuery(self, connector, other, self.config)
# On Combinable, these are not implemented to reduce confusion with Q. In
# this case we are actually (ab)using them to do logical combination so
# it's consistent with other usage in Django.
def __or__(self, other):
return self._combine(other, self.BITOR, False)
def __ror__(self, other):
return self._combine(other, self.BITOR, True)
def __and__(self, other):
return self._combine(other, self.BITAND, False)
def __rand__(self, other):
return self._combine(other, self.BITAND, True)
class SearchQuery(SearchQueryCombinable, Value):
output_field = SearchQueryField()
SEARCH_TYPES = {
'plain': 'plainto_tsquery',
'phrase': 'phraseto_tsquery',
'raw': 'to_tsquery',
}
def __init__(self, value, output_field=None, *, config=None, invert=False, search_type='plain'):
self.config = config
self.invert = invert
if search_type not in self.SEARCH_TYPES:
raise ValueError("Unknown search_type argument '%s'." % search_type)
self.search_type = search_type
super().__init__(value, output_field=output_field)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
resolved = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
if self.config:
if not hasattr(self.config, 'resolve_expression'):
resolved.config = Value(self.config).resolve_expression(query, allow_joins, reuse, summarize, for_save)
else:
resolved.config = self.config.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return resolved
def as_sql(self, compiler, connection):
params = [self.value]
function = self.SEARCH_TYPES[self.search_type]
if self.config:
config_sql, config_params = compiler.compile(self.config)
template = '{}({}::regconfig, %s)'.format(function, config_sql)
params = config_params + [self.value]
else:
template = '{}(%s)'.format(function)
if self.invert:
template = '!!({})'.format(template)
return template, params
def _combine(self, other, connector, reversed):
combined = super()._combine(other, connector, reversed)
combined.output_field = SearchQueryField()
return combined
def __invert__(self):
return type(self)(self.value, config=self.config, invert=not self.invert)
def __str__(self):
result = super().__str__()
return ('~%s' % result) if self.invert else result
class CombinedSearchQuery(SearchQueryCombinable, CombinedExpression):
def __init__(self, lhs, connector, rhs, config, output_field=None):
self.config = config
super().__init__(lhs, connector, rhs, output_field)
def __str__(self):
return '(%s)' % super().__str__()
class SearchRank(Func):
function = 'ts_rank'
output_field = FloatField()
def __init__(self, vector, query, **extra):
if not hasattr(vector, 'resolve_expression'):
vector = SearchVector(vector)
if not hasattr(query, 'resolve_expression'):
query = SearchQuery(query)
weights = extra.get('weights')
if weights is not None and not hasattr(weights, 'resolve_expression'):
weights = Value(weights)
self.weights = weights
super().__init__(vector, query, **extra)
def as_sql(self, compiler, connection, function=None, template=None):
extra_params = []
extra_context = {}
if template is None and self.extra.get('weights'):
if self.weights:
template = '%(function)s(%(weights)s, %(expressions)s)'
weight_sql, extra_params = compiler.compile(self.weights)
extra_context['weights'] = weight_sql
sql, params = super().as_sql(
compiler, connection,
function=function, template=template, **extra_context
)
return sql, extra_params + params
SearchVectorField.register_lookup(SearchVectorExact)
class TrigramBase(Func):
output_field = FloatField()
def __init__(self, expression, string, **extra):
if not hasattr(string, 'resolve_expression'):
string = Value(string)
super().__init__(expression, string, **extra)
class TrigramSimilarity(TrigramBase):
function = 'SIMILARITY'
class TrigramDistance(TrigramBase):
function = ''
arg_joiner = ' <-> '
|
|
"""
sentry.plugins.base.v2
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
__all__ = ('Plugin2',)
import logging
from django.http import HttpResponseRedirect
from threading import local
from sentry.plugins.base.response import Response
class PluginMount(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if IPlugin2 in bases:
return new_cls
if new_cls.title is None:
new_cls.title = new_cls.__name__
if not new_cls.slug:
new_cls.slug = new_cls.title.replace(' ', '-').lower()
if not hasattr(new_cls, 'logger'):
new_cls.logger = logging.getLogger('sentry.plugins.%s' % (new_cls.slug,))
return new_cls
class IPlugin2(local):
"""
Plugin interface. Should not be inherited from directly.
A plugin should be treated as if it were a singleton. The owner does not
control when or how the plugin gets instantiated, nor is it guaranteed that
it will happen, or happen more than once.
>>> from sentry.plugins import Plugin2
>>>
>>> class MyPlugin(Plugin2):
>>> def get_title(self):
>>> return 'My Plugin'
As a general rule all inherited methods should allow ``**kwargs`` to ensure
ease of future compatibility.
"""
# Generic plugin information
title = None
slug = None
description = None
version = None
author = None
author_url = None
resource_links = ()
# Configuration specifics
conf_key = None
conf_title = None
project_conf_form = None
project_conf_template = 'sentry/plugins/project_configuration.html'
# Global enabled state
enabled = True
can_disable = True
# Should this plugin be enabled by default for projects?
project_default_enabled = False
def _get_option_key(self, key):
return '%s:%s' % (self.get_conf_key(), key)
def is_enabled(self, project=None):
"""
Returns a boolean representing if this plugin is enabled.
If ``project`` is passed, it will limit the scope to that project.
>>> plugin.is_enabled()
"""
if not self.enabled:
return False
if not self.can_disable:
return True
if not self.can_enable_for_projects():
return True
if project:
project_enabled = self.get_option('enabled', project)
if project_enabled is not None:
return project_enabled
else:
return self.project_default_enabled
return True
def reset_options(self, project=None, user=None):
from .helpers import reset_options
return reset_options(self.get_conf_key(), project, user)
def get_option(self, key, project=None, user=None):
"""
Returns the value of an option in your plugins keyspace, or ``None`` if
one is not present.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> value = plugin.get_option('my_option')
"""
from sentry.plugins.helpers import get_option
return get_option(self._get_option_key(key), project, user)
def set_option(self, key, value, project=None, user=None):
"""
Updates the value of an option in your plugins keyspace.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> plugin.set_option('my_option', 'http://example.com')
"""
from sentry.plugins.helpers import set_option
return set_option(self._get_option_key(key), value, project, user)
def unset_option(self, key, project=None, user=None):
"""
Removes an option in your plugins keyspace.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> plugin.unset_option('my_option')
"""
from sentry.plugins.helpers import unset_option
return unset_option(self._get_option_key(key), project, user)
def get_conf_key(self):
"""
Returns a string representing the configuration keyspace prefix for this plugin.
"""
if not self.conf_key:
self.conf_key = self.get_conf_title().lower().replace(' ', '_')
return self.conf_key
def get_conf_title(self):
"""
Returns a string representing the title to be shown on the configuration page.
"""
return self.conf_title or self.get_title()
def has_project_conf(self):
return self.project_conf_form is not None
def can_enable_for_projects(self):
"""
Returns a boolean describing whether this plugin can be enabled on a per project basis
"""
return True
# Response methods
def redirect(self, url):
"""
Returns a redirect response type.
"""
return HttpResponseRedirect(url)
def render(self, template, context=None):
"""
Given a template name, and an optional context (dictionary), returns a
ready-to-render response.
Default context includes the plugin instance.
>>> plugin.render('template.html', {'hello': 'world'})
"""
if context is None:
context = {}
context['plugin'] = self
return Response(template, context)
# The following methods are specific to web requests
def get_title(self):
"""
Returns the general title for this plugin.
>>> plugin.get_title()
"""
return self.title
def get_description(self):
"""
Returns the description for this plugin. This is shown on the plugin configuration
page.
>>> plugin.get_description()
"""
return self.description
def get_resource_links(self):
"""
Returns a list of tuples pointing to various resources for this plugin.
>>> def get_resource_links(self):
>>> return [
>>> ('Documentation', 'http://sentry.readthedocs.org'),
>>> ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),
>>> ('Source', 'https://github.com/getsentry/sentry'),
>>> ]
"""
return self.resource_links
def get_rules(self, **kwargs):
"""
Return a list of Rule classes to add to the registry.
>>> def get_rules(self, **kwargs):
>>> return [MyCustomRule]
"""
return []
def get_actions(self, request, group, **kwargs):
"""
Return a list of available actions to append this aggregate.
Examples of built-in actions are "Mute Event" and "Remove Data".
An action is a tuple containing two elements:
('Action Label', '/uri/to/action/')
>>> def get_actions(self, request, group, **kwargs):
>>> return [('Google', 'http://google.com')]
"""
return []
def get_annotations(self, request, group, **kwargs):
"""
Return a list of annotations to append to this aggregate.
An example of an annotation might be "Needs Fix" or "Task #123".
The properties of each tag must match the constructor for
:class:`sentry.plugins.Annotation`
>>> def get_annotations(self, request, group, **kwargs):
>>> task_id = GroupMeta.objects.get_value(group, 'myplugin:tid')
>>> if not task_id:
>>> return []
>>> return [{'label': '#%s' % (task_id,)}]
"""
return []
def get_notifiers(self, **kwargs):
"""
Return a list of notifiers to append to the registry.
Notifiers must extend :class:`sentry.plugins.Notifier`.
>>> def get_notifiers(self, **kwargs):
>>> return [MyNotifier]
"""
return []
def get_tags(self, event, **kwargs):
"""
Return a list of additional tags to add to this instance.
A tag is a tuple containing two elements:
('tag-key', 'tag-value')
>>> def get_tags(self, event, **kwargs):
>>> return [('tag-key', 'tag-value')]
"""
return []
def get_event_preprocessors(self, **kwargs):
"""
Return a list of preprocessors to apply to the given event.
A preprocessor is a function that takes the normalized data blob as an
input and returns modified data as output. If no changes to the data are
made it is safe to return ``None``.
>>> def get_event_preprocessors(self, **kwargs):
>>> return [lambda x: x]
"""
return []
def get_feature_hooks(self, **kwargs):
"""
Return a list of callables to check for feature status.
>>> from sentry.features import FeatureHandler
>>>
>>> class NoRegistration(FeatureHandler):
>>> features = set(['auth:register'])
>>>
>>> def has(self, feature, actor):
>>> return False
>>> def get_feature_hooks(self, **kwargs):
>>> return [NoRegistration()]
"""
return []
def get_release_hook(self, **kwargs):
"""
Return an implementation of ``ReleaseHook``.
>>> from sentry.plugins import ReleaseHook
>>>
>>> class MyReleaseHook(ReleaseHook):
>>> def handle(self, request):
>>> self.finish_release(version=request.POST['version'])
>>> def get_release_hook(self, **kwargs):
>>> return MyReleaseHook
"""
return []
class Plugin2(IPlugin2):
"""
A plugin should be treated as if it were a singleton. The owner does not
control when or how the plugin gets instantiated, nor is it guaranteed that
it will happen, or happen more than once.
"""
__version__ = 2
__metaclass__ = PluginMount
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from mongoengine import ValidationError
from st2api.controllers import resource
from st2common import log as logging
from st2common.exceptions.actionalias import ActionAliasAmbiguityException
from st2common.exceptions.apivalidation import ValueValidationException
from st2common.models.api.action import ActionAliasAPI
from st2common.persistence.actionalias import ActionAlias
from st2common.rbac.types import PermissionType
from st2common.rbac.backends import get_rbac_backend
from st2common.router import abort
from st2common.router import Response
from st2common.util.actionalias_matching import get_matching_alias
from st2common.util.actionalias_helpstring import generate_helpstring_result
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class ActionAliasController(resource.ContentPackResourceController):
"""
Implements the RESTful interface for ActionAliases.
"""
model = ActionAliasAPI
access = ActionAlias
supported_filters = {"name": "name", "pack": "pack"}
query_options = {"sort": ["pack", "name"]}
_custom_actions = {"match": ["POST"], "help": ["POST"]}
def get_all(
self,
exclude_attributes=None,
include_attributes=None,
sort=None,
offset=0,
limit=None,
requester_user=None,
**raw_filters,
):
return super(ActionAliasController, self)._get_all(
exclude_fields=exclude_attributes,
include_fields=include_attributes,
sort=sort,
offset=offset,
limit=limit,
raw_filters=raw_filters,
requester_user=requester_user,
)
def get_one(self, ref_or_id, requester_user):
permission_type = PermissionType.ACTION_ALIAS_VIEW
return super(ActionAliasController, self)._get_one(
ref_or_id, requester_user=requester_user, permission_type=permission_type
)
def match(self, action_alias_match_api):
"""
Find a matching action alias.
Handles requests:
POST /actionalias/match
"""
command = action_alias_match_api.command
try:
format_ = get_matching_alias(command=command)
except ActionAliasAmbiguityException as e:
LOG.exception(
'Command "%s" matched (%s) patterns.', e.command, len(e.matches)
)
return abort(http_client.BAD_REQUEST, six.text_type(e))
# Convert ActionAliasDB to API
action_alias_api = ActionAliasAPI.from_model(format_["alias"])
return {
"actionalias": action_alias_api,
"display": format_["display"],
"representation": format_["representation"],
}
def help(self, filter, pack, limit, offset, **kwargs):
"""
Get available help strings for action aliases.
Handles requests:
GET /actionalias/help
"""
try:
aliases_resp = super(ActionAliasController, self)._get_all(**kwargs)
aliases = [ActionAliasAPI(**alias) for alias in aliases_resp.json]
return generate_helpstring_result(
aliases, filter, pack, int(limit), int(offset)
)
except (TypeError) as e:
LOG.exception(
"Helpstring request contains an invalid data type: %s.",
six.text_type(e),
)
return abort(http_client.BAD_REQUEST, six.text_type(e))
def post(self, action_alias, requester_user):
"""
Create a new ActionAlias.
Handles requests:
POST /actionalias/
"""
permission_type = PermissionType.ACTION_ALIAS_CREATE
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_has_resource_api_permission(
user_db=requester_user,
resource_api=action_alias,
permission_type=permission_type,
)
try:
action_alias_db = ActionAliasAPI.to_model(action_alias)
LOG.debug(
"/actionalias/ POST verified ActionAliasAPI and formulated ActionAliasDB=%s",
action_alias_db,
)
action_alias_db = ActionAlias.add_or_update(action_alias_db)
except (ValidationError, ValueError, ValueValidationException) as e:
LOG.exception("Validation failed for action alias data=%s.", action_alias)
abort(http_client.BAD_REQUEST, six.text_type(e))
return
extra = {"action_alias_db": action_alias_db}
LOG.audit(
"Action alias created. ActionAlias.id=%s" % (action_alias_db.id),
extra=extra,
)
action_alias_api = ActionAliasAPI.from_model(action_alias_db)
return Response(json=action_alias_api, status=http_client.CREATED)
def put(self, action_alias, ref_or_id, requester_user):
"""
Update an action alias.
Handles requests:
PUT /actionalias/1
"""
action_alias_db = self._get_by_ref_or_id(ref_or_id=ref_or_id)
LOG.debug(
"PUT /actionalias/ lookup with id=%s found object: %s",
ref_or_id,
action_alias_db,
)
permission_type = PermissionType.ACTION_ALIAS_MODIFY
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_has_resource_db_permission(
user_db=requester_user,
resource_db=action_alias_db,
permission_type=permission_type,
)
if not hasattr(action_alias, "id"):
action_alias.id = None
try:
if (
action_alias.id is not None
and action_alias.id != ""
and action_alias.id != ref_or_id
):
LOG.warning(
"Discarding mismatched id=%s found in payload and using uri_id=%s.",
action_alias.id,
ref_or_id,
)
old_action_alias_db = action_alias_db
action_alias_db = ActionAliasAPI.to_model(action_alias)
action_alias_db.id = ref_or_id
action_alias_db = ActionAlias.add_or_update(action_alias_db)
except (ValidationError, ValueError) as e:
LOG.exception("Validation failed for action alias data=%s", action_alias)
abort(http_client.BAD_REQUEST, six.text_type(e))
return
extra = {
"old_action_alias_db": old_action_alias_db,
"new_action_alias_db": action_alias_db,
}
LOG.audit(
"Action alias updated. ActionAlias.id=%s." % (action_alias_db.id),
extra=extra,
)
action_alias_api = ActionAliasAPI.from_model(action_alias_db)
return action_alias_api
def delete(self, ref_or_id, requester_user):
"""
Delete an action alias.
Handles requests:
DELETE /actionalias/1
"""
action_alias_db = self._get_by_ref_or_id(ref_or_id=ref_or_id)
LOG.debug(
"DELETE /actionalias/ lookup with id=%s found object: %s",
ref_or_id,
action_alias_db,
)
permission_type = PermissionType.ACTION_ALIAS_DELETE
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_has_resource_db_permission(
user_db=requester_user,
resource_db=action_alias_db,
permission_type=permission_type,
)
try:
ActionAlias.delete(action_alias_db)
except Exception as e:
LOG.exception(
'Database delete encountered exception during delete of id="%s".',
ref_or_id,
)
abort(http_client.INTERNAL_SERVER_ERROR, six.text_type(e))
return
extra = {"action_alias_db": action_alias_db}
LOG.audit(
"Action alias deleted. ActionAlias.id=%s." % (action_alias_db.id),
extra=extra,
)
return Response(status=http_client.NO_CONTENT)
action_alias_controller = ActionAliasController()
|
|
#!/usr/bin/env python
"""
MIDImeter v0.2 by Art Chaidarun
2012-08-03
"""
from __future__ import division
import os
import time
import pygame as pg
import pygame.midi
def textblit(text, x, y, boxwidth, align="l"):
"""Display text in the custom font"""
textwidth = typeface.size(text)[0]
if textwidth > boxwidth:
textwidth = boxwidth
if align == "c":
x += (boxwidth - textwidth) // 2
elif align == "r":
x += boxwidth - textwidth
window.blit(typeface.render(text, 1, (0, 0, 0)), (x, y))
def rainbow(numerator, denominator):
"""Convert green-to-red spectrum percentage into RGB tuple"""
try:
c = round(numerator / denominator * 255)
if c in range(64):
return (c * 4, 128 + c * 2, 32 - c // 2)
else:
return (255, 255 - (c - 64) * 4 // 3, 0)
except ZeroDivisionError as e:
# If no data, return the mid-spectrum color (i.e. orange)
return (255, 170, 0)
def reset_stats():
"""Reset all information"""
global hitcount, pitchcount, minhits, maxhits, minstamp, maxstamp, minvolume, maxvolume, note
hitcount = 0
pitchcount = 0
minhits = 88
maxhits = 0
minstamp = 600000
maxstamp = 0
minvolume = 128
maxvolume = 0
note = [0] * 128 # [note pitch]
for i in range(128):
# Total hit count, last hit timestamp, current hit volume, max hit volume
note[i] = {'hits': 0, 'stamp': 0, 'active': 0, 'maxvol': 0}
pg.event.clear()
def clear_whites():
"""Blank out the white keys"""
# Benchmark results: For painting 156 keys vs. blitting 3 keyboards, draw.rect
# is 2.5x faster than window.blit while window.fill is 8.5x faster than blit.
for i in [263, 383, 503]:
for j in range(52):
window.fill((255, 255, 255), (11 + 15 * j, i, 13, 86))
def clear_blacks():
"""Blank out the black keys"""
for i in [263, 383, 503]:
window.blit(whiteshadow, (11, i))
for j in blackxs:
window.fill((0, 0, 0), (j - 2, i, 11, 55))
window.fill((45, 45, 45), (j, i, 7, 53))
def show_shadows():
"""Overlay black keys' shadows onto keyboards"""
window.blit(blackshadow, (9, 261))
window.blit(blackshadow, (9, 381))
window.blit(blackshadow, (9, 501))
def update_dev(increment=0):
"""Update the current MIDI input device"""
global devno
devno += increment
if devno >= len(idevs):
devno = 0
if devno < 0:
devno = len(idevs) - 1
window.blit(devbg, (1, 152))
window.blit(numbg, (81, 179))
window.blit(numbg, (126, 179))
textblit(idevs[devno][1], 1, 152, 238, "c")
textblit(str(devno + 1), 81, 179, 34, "r")
textblit(str(len(idevs)), 126, 179, 34, "l")
def update_time():
"""Update the playing time"""
def update_info():
"""Update the performance stats"""
pg.init()
pg.fastevent.init()
pg.midi.init()
# Load startup resources
background = pg.image.load("background.png")
devbg = background.subsurface((1, 152, 238, 15))
numbg = background.subsurface((126, 197, 34, 15))
typeface = pg.font.Font("VarelaRound-Regular.ttf", 15)
# Detect all MIDI devices, filter out outputs, create sanitized list of inputs
idevs = []
piano = -1
if pg.midi.get_count() > 0:
for i in range(pg.midi.get_count()):
if pg.midi.get_device_info(i)[2]: # if device is an input device
name = pg.midi.get_device_info(i)[1]
while " " in name:
name = name.replace(" ", " ")
idevs.append((i, name))
lname = name.lower()
if piano < 0 and any(x in lname for x in ("piano", "key", "usb")):
piano = i
# Select initial input device
if 0 < piano or 0 < len(idevs):
if 0 < piano:
devid = piano
else:
devid = idevs[0][0]
idev = pg.midi.Input(devid)
for n in range(len(idevs)):
if idevs[n][0] == devid:
devno = n
break
else:
devid = -1
# Create program window
os.environ['SDL_VIDEO_CENTERED'] = '1'
pg.display.set_icon(pg.image.load("icon.png"))
pg.display.set_caption("MIDImeter v0.2")
window = pg.display.set_mode((800, 600))
# Display program window
window.blit(background, (0, 0))
update_dev(0)
# TODO: Convert all flips into updates
pg.display.flip()
# Lazy-initialize stuff that isn't required for initial window display
import sys
wglow = pg.image.load("wglow.png")
bglow = pg.image.load("bglow.png")
blackshadow = pg.image.load("blackshadow.png")
whiteshadow = pg.image.load("whiteshadow.png")
infobg = background.subsurface((689, 180, 85, 11))
whitepitches = [21, 23, 24, 26, 28, 29, 31, 33, 35, 36, 38, 40, 41,
43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64,
65, 67, 69, 71, 72, 74, 76, 77, 79, 81, 83, 84, 86,
88, 89, 91, 93, 95, 96, 98, 100, 101, 103, 105, 107, 108]
blackpitches = [22, 25, 27, 30, 32, 34, 37, 39, 42, 44, 46, 49,
51, 54, 56, 58, 61, 63, 66, 68, 70, 73, 75, 78,
80, 82, 85, 87, 90, 92, 94, 97, 99, 102, 104, 106]
blackxs = [21, 51, 66, 96, 111, 126, 156, 171, 201, 216, 231, 261,
276, 306, 321, 336, 366, 381, 411, 426, 441, 471, 486, 516,
531, 546, 576, 591, 621, 636, 651, 681, 696, 726, 741, 756]
recording = False
reset_stats()
while 1:
events = pg.fastevent.get()
for e in events:
if e.type in [pg.midi.MIDIIN]:
pitch = e.data1
if e.status in range(128, 160):
clear_whites()
# MIDI reference: http://www.onicos.com/staff/iz/formats/midi-event.html
if e.status < 144:
# Note was turned off
note[pitch]['active'] = 0
elif e.status < 160:
# Note was turned on
if not recording:
recording = True
# Set the starting time as the current MIDI timestamp - 2. Two ms
# must be subtracted to prevent the elapsed variable from being
# 0, which causes a ZeroDivisionError in rainbow().
starttime = pg.midi.time() - 2
note[pitch]['active'] = 1
hitcount += 1
note[pitch]['hits'] += 1
# Update statistics' ranges
if note[pitch]['hits'] < minhits:
minhits = note[pitch]['hits']
if note[pitch]['hits'] > maxhits:
maxhits = note[pitch]['hits']
elapsed = int(e.timestamp) - starttime
note[pitch]['stamp'] = elapsed
if e.data2 > note[pitch]['maxvol']:
note[pitch]['maxvol'] = e.data2
if e.data2 < minvolume:
minvolume = e.data2
deltavolume = maxvolume - minvolume + 1
if e.data2 > maxvolume:
maxvolume = e.data2
deltavolume = maxvolume - minvolume + 1
else:
continue
# Update white key displays
for i in range(52):
j = whitepitches[i]
h = note[j]['hits']
if h:
x = 11 + 15 * i
window.fill(rainbow(note[j]['stamp'], elapsed), (x, 263, 13, 86))
window.fill(rainbow(h, maxhits), (x, 383, 13, 86))
window.fill(rainbow(note[j]['maxvol'] - minvolume, deltavolume), (x, 503, 13, 86))
# Update black key displays
clear_blacks()
for i in range(36):
j = blackpitches[i]
h = note[j]['hits']
if h:
x = blackxs[i]
window.fill(rainbow(note[j]['stamp'], elapsed), (x, 263, 7, 53))
window.fill(rainbow(h, maxhits), (x, 383, 7, 53))
window.fill(rainbow(note[j]['maxvol'] - minvolume, deltavolume), (x, 503, 7, 53))
show_shadows()
# Overlay blue glows onto active keys
for pitch in range (21, 109):
if note[pitch]['active']:
if pitch in whitepitches:
x = 15 * whitepitches.index(pitch) + 11
window.blit(wglow, (x, 320))
window.blit(wglow, (x, 440))
window.blit(wglow, (x, 560))
else:
x = blackxs[blackpitches.index(pitch)]
window.blit(bglow, (x, 287))
window.blit(bglow, (x, 407))
window.blit(bglow, (x, 527))
for i in [263, 383, 503]:
pg.display.update((11, i, 780, 86))
elif e.type in [pg.KEYDOWN]:
keyname = pg.key.name(e.key)
if keyname == "space":
# Stop recording
recording = False
clear_whites()
clear_blacks()
show_shadows()
reset_stats()
pg.display.flip()
elif keyname == "left":
# Scroll USB device selection leftward
update_dev(-1)
pg.display.flip()
elif keyname == "right":
# Scroll USB device selection rightward
update_dev(1)
pg.display.flip()
elif e.type in [pg.QUIT]:
del idev
pg.midi.quit()
sys.exit(0)
if idev.poll():
midi_events = idev.read(10)
# convert them into pg events.
midi_evs = pg.midi.midis2events(midi_events, idev.device_id)
for m_e in midi_evs:
pg.fastevent.post(m_e)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SharesOperations(object):
"""SharesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datashare.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_synchronization_details(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
share_synchronization, # type: "_models.ShareSynchronization"
skip_token=None, # type: Optional[str]
filter=None, # type: Optional[str]
orderby=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SynchronizationDetailsList"]
"""List data set level details for a share synchronization.
List synchronization details.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:param share_synchronization: Share Synchronization payload.
:type share_synchronization: ~azure.mgmt.datashare.models.ShareSynchronization
:param skip_token: Continuation token.
:type skip_token: str
:param filter: Filters the results using OData syntax.
:type filter: str
:param orderby: Sorts the results using OData syntax.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SynchronizationDetailsList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.datashare.models.SynchronizationDetailsList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SynchronizationDetailsList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_synchronization_details.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(share_synchronization, 'ShareSynchronization')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(share_synchronization, 'ShareSynchronization')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SynchronizationDetailsList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_synchronization_details.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/listSynchronizationDetails'} # type: ignore
def list_synchronizations(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
skip_token=None, # type: Optional[str]
filter=None, # type: Optional[str]
orderby=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ShareSynchronizationList"]
"""List Synchronizations in a share.
List synchronizations of a share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:param skip_token: Continuation token.
:type skip_token: str
:param filter: Filters the results using OData syntax.
:type filter: str
:param orderby: Sorts the results using OData syntax.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ShareSynchronizationList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.datashare.models.ShareSynchronizationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareSynchronizationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_synchronizations.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ShareSynchronizationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_synchronizations.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/listSynchronizations'} # type: ignore
def get(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Share"
"""Get a specified share.
Get a share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share to retrieve.
:type share_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Share, or the result of cls(response)
:rtype: ~azure.mgmt.datashare.models.Share
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Share"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Share', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}'} # type: ignore
def create(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
share, # type: "_models.Share"
**kwargs # type: Any
):
# type: (...) -> "_models.Share"
"""Create a share in the given account.
Create a share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:param share: The share payload.
:type share: ~azure.mgmt.datashare.models.Share
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Share, or the result of cls(response)
:rtype: ~azure.mgmt.datashare.models.Share
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Share"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(share, 'Share')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Share', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Share', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.OperationResponse"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.OperationResponse"]
"""Deletes a share.
Delete a share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either OperationResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.datashare.models.OperationResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}'} # type: ignore
def list_by_account(
self,
resource_group_name, # type: str
account_name, # type: str
skip_token=None, # type: Optional[str]
filter=None, # type: Optional[str]
orderby=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ShareList"]
"""List of available shares under an account.
List shares in an account.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param skip_token: Continuation Token.
:type skip_token: str
:param filter: Filters the results using OData syntax.
:type filter: str
:param orderby: Sorts the results using OData syntax.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ShareList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.datashare.models.ShareList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_account.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ShareList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares'} # type: ignore
|
|
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA, RandomizedPCA
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn.utils import check_array
from .features import Features, as_features
# TODO: support inplace transformations here
class BagPreprocesser(BaseEstimator, TransformerMixin):
'''
Applies a preprocessing estimator to each column of the bags, independently.
This is a reasonable thing to do for some cases
(especially :class:`sklearn.preprocessing.StandardScaler`,
:class:`sklearn.preprocessing.MinMaxScaler`,
:class:`sklearn.decomposition.PCA`).
It's not a reasonable thing to do for things that rely on interactions
between points, or that change the number of output points. (Changing
the dimension is okay.)
Parameters
----------
transformer : an sklearn transformer
The transformer to apply to the stacked features. Must return the
same number of features.
'''
def __init__(self, transformer):
t = transformer
if (not (hasattr(t, "fit") or hasattr(t, 'fit_transform')) or
not hasattr(t, "transform")):
raise TypeError("The transformer doesn't have appropriate methods.")
self.transformer = t
def _gather_outputs(self, old, new):
if new.shape[0] != old.total_points:
msg = "Transformer changed number of points from {} to {}"
raise ValueError(msg.format(old.total_points, new.shape[0]))
return Features(new, old.n_pts, **old.meta)
def fit(self, X, y=None, **params):
'''
Fit the transformer on the stacked points.
Parameters
----------
X : :class:`Features` or list of arrays of shape ``[n_samples[i], n_features]``
Training set. If a Features object, it will be stacked.
any other keyword argument :
Passed on as keyword arguments to the transformer's ``fit()``.
'''
X = as_features(X, stack=True)
self.transformer.fit(X.stacked_features, y, **params)
return self
def transform(self, X, **params):
'''
Transform the stacked points.
Parameters
----------
X : :class:`Features` or list of bag feature arrays
New data to transform.
any other keyword argument :
Passed on as keyword arguments to the transformer's ``transform()``.
Returns
-------
X_new : :class:`Features`
Transformed features.
'''
X = as_features(X, stack=True)
X_new = self.transformer.transform(X.stacked_features, **params)
return self._gather_outputs(X, X_new)
def fit_transform(self, X, y=None, **params):
'''
Fit and transform the stacked points.
Parameters
----------
X : :class:`Features` or list of bag feature arrays
Data to train on and transform.
any other keyword argument :
Passed on as keyword arguments to the transformer's ``transform()``.
Returns
-------
X_new : :class:`Features`
Transformed features.
'''
X = as_features(X, stack=True)
X_new = self.transformer.fit_transform(X.stacked_features, y, **params)
return self._gather_outputs(X, X_new)
def inverse_transform(self, X, **params):
'''
Transform data back to its original space, i.e., return an input
X_original whose transform would (maybe approximately) be X.
Parameters
----------
X : :class:`Features` or list of bag feature arrays
Data to train on and transform.
any other keyword argument :
Passed on as keyword arguments to the transformer's
``inverse_transform()``.
Returns
-------
X_original : :class:`Features`
'''
X = as_features(X, stack=True)
Xo = self.transformer.inverse_transform(X.stacked_features, **params)
return self._gather_outputs(X, Xo)
class BagStandardizer(BagPreprocesser):
'''
Standardizes each feature dimension to have zero mean and unit variance,
regardless of the bag it falls into.
This is just :class:`BagPreprocesser` with
:class:`sklearn.preprocessing.StandardScaler`.
'''
def __init__(self):
super(BagStandardizer, self).__init__(StandardScaler())
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Standardizes features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The standardization is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This standardization is often used as an alternative to zero mean,
unit variance scaling.
Notes
-----
This is a version of :class:`sklearn.preprocessing.MinMaxScaler`
with support for truncation added. It's been
`proposed <https://github.com/scikit-learn/scikit-learn/pull/3342>`_
for inclusion in scikit-learn, but is not yet in there.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default is True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
truncate : boolean, optional, default is False
If True, :meth:`transform` will truncate any inputs that lie outside
the min/max of the values passed to :meth:`fit` to lie on the ends
of feature_range. Normally, the transform of these points will be
outside feature_range.
fit_feature_range : None or tuple (min, max), default None
If not None, :meth:`fit` will actually rescale such that the passed
features all lie within fit_feature_range rather than just
feature_range. This is useful when truncate is True, to give
some "wiggle room" before it starts truncating. Otherwise it just
effectively overrides feature_range.
Attributes
----------
`min_` : ndarray, shape (n_features,)
Per feature adjustment for minimum.
`scale_` : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True, truncate=False,
fit_feature_range=None):
self.feature_range = feature_range
self.copy = copy
self.truncate = truncate
self.fit_feature_range = fit_feature_range
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy,
dtype=[np.float64, np.float32, np.float16, np.float128])
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if self.fit_feature_range is not None:
fit_feature_range = self.fit_feature_range
if fit_feature_range[0] >= fit_feature_range[1]:
raise ValueError("Minimum of desired (fit) feature range must "
"be smaller than maximum. Got %s."
% str(feature_range))
if (fit_feature_range[0] < feature_range[0] or
fit_feature_range[1] > feature_range[1]):
raise ValueError("fit_feature_range must be a subset of "
"feature_range. Got %s, fit %s."
% (str(feature_range),
str(fit_feature_range)))
feature_range = fit_feature_range
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
# Do not scale constant features
data_range[data_range == 0.0] = 1.0
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_array(X, copy=self.copy)
X *= self.scale_
X += self.min_
if self.truncate:
np.maximum(self.feature_range[0], X, out=X)
np.minimum(self.feature_range[1], X, out=X)
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Note that if truncate is true, any truncated points will not
be restored exactly.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_array(X, copy=self.copy)
X -= self.min_
X /= self.scale_
return X
class BagMinMaxScaler(BagPreprocesser):
'''
Linearly scales each feature dimension to lie within the given range, for
example [0, 1].
This is just :class:`BagPreprocesser` with :class:`MinMaxScaler`.
Parameters
----------
feature_range : tuple (min, max), default = (0, 1)
Desired range of the transformed data.
'''
def __init__(self, feature_range=(0, 1), truncate=False,
fit_feature_range=None):
super(BagMinMaxScaler, self).__init__(MinMaxScaler(
feature_range=feature_range, truncate=truncate,
fit_feature_range=fit_feature_range))
class BagNormalizer(BagPreprocesser):
'''
Normalizes each sample individually to have unit norm (l1 or l2).
This is just :class:`BagPreprocesser` with
:class:`sklearn.preprocessing.Normalizer`.
Parameters
----------
norm : 'l1' or 'l2', optional, default 'l2'
The norm to use to normalize each nonzero sample.
'''
def __init__(self, norm='l2'):
super(BagNormalizer, self).__init__(Normalizer(norm))
DEFAULT_VARFRAC = 0.7
class BagPCA(BagPreprocesser):
'''
Runs principal components analysis to reduce the dimensionality of the
features.
This is just :class:`BagPreprocesser` with
either :class:`sklearn.decomposition.PCA`
or :class:`sklearn.decomposition.RandomizedPCA`.
Parameters
----------
k : int, optional
The dimensionality to reduce to.
mle_components : boolean, optional, default False
Use Minka's MLE for determining the number of components.
varfrac : float in (0, 1], optional, default 0.7
Use enough components to cover this fraction of the variance.
Only one of {k, mle_components, varfrac} can be passed.
randomize : boolean, optional, default False
Use a randomized PCA. This can be faster and less memory-intensive for
large inputs, but is approximate and requires specifying an explicit
number of components.
whiten : boolean, optional, default False
Whether to whiten the outputs, by dividing the components by the
singular values. This removes some information, but makes the variance
of the outputs the identity matrix.
'''
def __init__(self, k=None, mle_components=False, varfrac=None,
randomize=False, whiten=False):
n_specs = sum(1 for x in [k, mle_components, varfrac] if x)
if n_specs > 1:
msg = "can't specify number of components in more than one way"
raise TypeError(msg)
if n_specs == 0:
varfrac = DEFAULT_VARFRAC
if randomize:
if k is None:
raise TypeError("can't do random PCA without a specific k")
pca = RandomizedPCA(k, whiten=whiten)
else:
if k is not None:
n_components = k
elif mle_components:
n_components = 'mle'
elif varfrac is not None:
n_components = varfrac
pca = PCA(n_components, whiten=whiten)
super(BagPCA, self).__init__(pca)
|
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import contextlib
import itertools
import math
import os
from rally.common import utils as cutils
class StreamingAlgorithm(object, metaclass=abc.ABCMeta):
"""Base class for streaming computations that scale."""
@abc.abstractmethod
def add(self, value):
"""Process a single value from the input stream."""
@abc.abstractmethod
def merge(self, other):
"""Merge results processed by another instance."""
@abc.abstractmethod
def result(self):
"""Return the result based on the values processed so far."""
def _cast_to_float(self, value):
try:
return float(value)
except (TypeError, ValueError):
raise TypeError("Non-numerical value: %r" % value)
class MeanComputation(StreamingAlgorithm):
"""Compute mean for a stream of numbers."""
def __init__(self):
self.total = 0.0
self.count = 0
def add(self, value):
self.count += 1
self.total += value
def merge(self, other):
self.count += other.count
self.total += other.total
def result(self):
if self.count:
return self.total / self.count
return None
class StdDevComputation(StreamingAlgorithm):
"""Compute standard deviation for a stream of numbers."""
def __init__(self):
self.count = 0
# NOTE(msdubov): To compute std, we need the auxiliary variables below.
self.dev_sum = 0.0
self.mean_computation = MeanComputation()
self.mean = 0.0
def add(self, value):
# NOTE(msdubov): This streaming method for std computation appears
# in "The Art of Computer Programming" by D. Knuth,
# Vol 2, p. 232, 3rd edition.
self.count += 1
mean_prev = self.mean
self.mean_computation.add(value)
self.mean = self.mean_computation.result()
self.dev_sum = self.dev_sum + (value - mean_prev) * (value - self.mean)
def merge(self, other):
if not other.mean_computation.count:
return
dev_sum1 = self.dev_sum
count1 = self.count
mean1 = self.mean
dev_sum2 = other.dev_sum
count2 = other.count
mean2 = other.mean
self.mean_computation.merge(other.mean_computation)
self.mean = self.mean_computation.result()
self.count += other.count
self.dev_sum = (dev_sum1 + count1 * mean1 ** 2
+ dev_sum2 + count2 * mean2 ** 2
- self.count * self.mean ** 2)
def result(self):
# NOTE(amaretskiy): Need at least two values to be processed
if self.count < 2:
return None
return math.sqrt(self.dev_sum / (self.count - 1))
class MinComputation(StreamingAlgorithm):
"""Compute minimal value from a stream of numbers."""
def __init__(self):
self._value = None
def add(self, value):
value = self._cast_to_float(value)
if self._value is None or value < self._value:
self._value = value
def merge(self, other):
if other._value is not None:
self.add(other._value)
def result(self):
return self._value
class MaxComputation(StreamingAlgorithm):
"""Compute maximal value from a stream of numbers."""
def __init__(self):
self._value = None
def add(self, value):
value = self._cast_to_float(value)
if self._value is None or value > self._value:
self._value = value
def merge(self, other):
if other._value is not None:
self.add(other._value)
def result(self):
return self._value
class PointsSaver(StreamingAlgorithm):
def __init__(self, chunk_size=10000, sep=" "):
self.chunk_size = chunk_size
self._sep = sep
self._filename = cutils.generate_random_path()
self._chunk = []
self._current_chunk_size = 0
self._deleted = False
def _dump_chunk(self):
with open(self._filename, "a") as f:
f.write(
" ".join(
itertools.chain(
(" ", ),
map(lambda x: str(x), self._chunk))
)
)
self._chunk = []
self._current_chunk_size = 0
def add(self, value):
if self._deleted:
raise TypeError("Cannot add more points since %s is in deleted "
"state." % self.__class__.__name__)
self._chunk.append(value)
self._current_chunk_size += 1
if self._current_chunk_size >= self.chunk_size:
self._dump_chunk()
def merge(self, other):
if self._deleted:
raise TypeError("Cannot merge points since %s is in deleted "
"state." % self.__class__.__name__)
for point in other.result():
self.add(point)
def result(self):
if self._deleted:
raise TypeError("Cannot fetch points since %s is in deleted "
"state." % self.__class__.__name__)
if os.path.isfile(self._filename):
with open(self._filename) as f:
data = f.read().strip(self._sep)
res = [float(p) for p in data.split(self._sep) if p]
else:
# the number of points were less than self.chunk_size, so they were
# not saved to the disk. OR no points at all
res = []
if self._chunk:
res.extend(self._chunk)
return res
def reset(self):
with contextlib.suppress(FileNotFoundError):
os.remove(self._filename)
self._deleted = True
self._chunk = []
self._current_chunk_size = 0
class IncrementComputation(StreamingAlgorithm):
"""Simple incremental counter."""
def __init__(self):
self._count = 0
def add(self, *args):
self._count += 1
def merge(self, other):
self._count += other._count
def result(self):
return self._count
class DegradationComputation(StreamingAlgorithm):
"""Calculates degradation from a stream of numbers
Finds min and max values from a stream and then calculates
ratio between them in percentage. Works only with positive numbers.
"""
def __init__(self):
self.min_value = MinComputation()
self.max_value = MaxComputation()
def add(self, value):
if value <= 0.0:
raise ValueError("Unexpected value: %s" % value)
self.min_value.add(value)
self.max_value.add(value)
def merge(self, other):
min_result = other.min_value.result()
if min_result is not None:
self.min_value.add(min_result)
max_result = other.max_value.result()
if max_result is not None:
self.max_value.add(max_result)
def result(self):
min_result = self.min_value.result()
max_result = self.max_value.result()
if min_result is None or max_result is None:
return 0.0
return (max_result / min_result - 1) * 100.0
|
|
import sys
import os
import shutil
import zipfile
import glob
import subprocess
import argparse
import re
import vstudio
import util
def cleanArtifacts(in_artifactsPath):
print()
print("Cleaning artifacts")
if os.path.exists(in_artifactsPath):
shutil.rmtree(in_artifactsPath)
os.mkdir(in_artifactsPath)
return
def createVersionNumber():
output = os.getenv("BUILD_NUMBER", "dev")
return output
def stampReadme(in_platform, in_version):
readmefile = "docs/README.TXT";
with open("readme.tmp", "wt") as fout:
with open(readmefile, "rt") as fin:
for line in fin:
line = re.sub(r"Platform\:.*", "Platform: " + in_platform, line);
line = re.sub(r"Version\:.*", "Version: " + in_version, line);
fout.write(line)
# this will fail if file is read-only
shutil.move("readme.tmp", "artifacts" + os.sep + "README.TXT")
return
def buildWinDesktop(artifacts, version, rebuild):
# msbuild vars
projectPath = os.path.abspath("..")
projectFile = projectPath + os.sep + "solutions" + os.sep + "windowsDesktop_vc120" + os.sep + "brainCloud_winDesktop.sln"
if rebuild:
targets = "Rebuild"
else:
targets = "Build"
print()
print("Building windows api project")
print()
sys.stdout.flush()
switches = []
switches.append("/p:Platform=Win32")
# build release version of lib
config = "Release"
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
# build debug version of lib
config = "Debug"
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
switches = []
switches.append("/p:Platform=x64")
# build release version of lib
config = "Release"
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
# build debug version of lib
config = "Debug"
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
print()
print("Zipping library")
rootPath = os.path.abspath("..")
binPath = projectPath + os.sep + "solutions" + os.sep + "windowsDesktop_vc120" + os.sep + "bin"
# zip up build directly from source files
with zipfile.ZipFile("artifacts" + os.sep + "brainCloudClient_WindowsDesktop_" + version + ".zip", "w", compression=zipfile.ZIP_DEFLATED) as myzip:
for fname in glob.iglob(binPath + os.sep + "Win32" + os.sep + "Release" + os.sep + "*.*"):
myzip.write(fname, "lib" + os.sep + "win32" + os.sep + "release" + os.sep + os.path.basename(fname))
for fname in glob.iglob(binPath + os.sep + "Win32" + os.sep + "Debug" + os.sep + "*.*"):
myzip.write(fname, "lib" + os.sep + "win32" + os.sep + "debug" + os.sep + os.path.basename(fname))
for fname in glob.iglob(binPath + os.sep + "x64" + os.sep + "Release" + os.sep + "*.*"):
myzip.write(fname, "lib" + os.sep + "x64" + os.sep + "release" + os.sep + os.path.basename(fname))
for fname in glob.iglob(binPath + os.sep + "x64" + os.sep + "Debug" + os.sep + "*.*"):
myzip.write(fname, "lib" + os.sep + "x64" + os.sep + "debug" + os.sep + os.path.basename(fname))
util.zipdir(rootPath + os.sep + "include" + os.sep, myzip, "include")
util.zipdir(rootPath + os.sep + "lib" + os.sep + "jsoncpp-1.0.0", myzip, "thirdparty" + os.sep + "jsoncpp-1.0.0")
util.zipdir(rootPath + os.sep + "lib" + os.sep + "win32" + os.sep + "cpprestsdk-static" + os.sep + "Release" + os.sep + "include", myzip, "thirdparty" + os.sep + "casablanca" + os.sep + "include")
myzip.write("artifacts" + os.sep + "README.TXT", "README.TXT")
return
def buildWinStore(artifacts, version, rebuild):
print()
print("Building windows store project")
print()
sys.stdout.flush()
projectPath = os.path.abspath("..")
projectFile = projectPath + os.sep + "solutions" + os.sep + "windowsStore_vc120" + os.sep + "brainCloud_winstore.sln"
# winstore
if rebuild:
targets = "brainCloud_winstore:Rebuild"
else:
targets = "brainCloud_winstore"
configs = []
configs.append("Debug")
configs.append("Release")
platforms = []
platforms.append("Win32")
platforms.append("ARM")
platforms.append("x64")
for platform in platforms:
for config in configs:
print()
print("BUILDING FOR PLATFORM: " + platform + " CONFIG: " + config)
switches = []
switches.append("/p:Platform=" + platform)
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
# winphone 8.1
if rebuild:
targets = "brainCloud_wp:Rebuild"
else:
targets = "brainCloud_wp"
configs = []
configs.append("Debug")
configs.append("Release")
platforms = []
platforms.append("Win32")
platforms.append("ARM")
for platform in platforms:
for config in configs:
print()
print("BUILDING FOR PLATFORM: " + platform + " CONFIG: " + config)
switches = []
switches.append("/p:Platform=" + platform)
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
# winphone 8.0
if rebuild:
targets = "brainCloud_wp8:Rebuild"
else:
targets = "brainCloud_wp8"
configs = []
configs.append("Debug")
configs.append("Release")
platforms = []
platforms.append("Win32")
platforms.append("ARM")
for platform in platforms:
for config in configs:
print()
print("BUILDING FOR PLATFORM: " + platform + " CONFIG: " + config)
switches = []
switches.append("/p:Platform=" + platform)
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
print()
print("Zipping library files")
rootPath = os.path.abspath("..")
binPath = projectPath + os.sep + "solutions" + os.sep + "windowsStore_vc120" + os.sep + "bin"
# zip up build directly from source files
with zipfile.ZipFile("artifacts" + os.sep + "brainCloudClient_WindowsStore_" + version + ".zip", "w", compression=zipfile.ZIP_DEFLATED) as myzip:
util.zipdir(rootPath + os.sep + "include" + os.sep, myzip, "include")
util.zipdir(binPath, myzip, "lib")
util.zipdir(rootPath + os.sep + "lib" + os.sep + "jsoncpp-1.0.0", myzip, "thirdparty" + os.sep + "jsoncpp-1.0.0")
util.zipdir(rootPath + os.sep + "lib" + os.sep + "win32" + os.sep + "cpprestsdk-static" + os.sep + "Release" + os.sep + "include", myzip, "thirdparty" + os.sep + "casablanca" + os.sep + "include")
myzip.write("artifacts" + os.sep + "README.TXT", "README.TXT")
return
def buildWinUwp(artifacts, version, rebuild):
# msbuild vars
projectPath = os.path.abspath("..")
projectFile = projectPath + os.sep + "solutions" + os.sep + "windowsUniversal_vc140" + os.sep + "brainCloud_uwp.sln"
if rebuild:
targets = "brainCloud:Rebuild"
else:
targets = "brainCloud"
print()
print("Building windows universal project")
print()
sys.stdout.flush()
# first restore nuget packages
cwd = projectPath + os.sep + "solutions" + os.sep + "windowsUniversal_vc140"
cmd = []
cmd.append("nuget")
cmd.append("restore")
print("Restoring nuget packages...")
subprocess.check_call(cmd, cwd=cwd)
# now build it
switches = []
switches.append("/p:Platform=x86")
# build release version of lib
config = "Release"
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
# build debug version of lib
config = "Debug"
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
switches = []
switches.append("/p:Platform=x64")
# build release version of lib
config = "Release"
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
# build debug version of lib
config = "Debug"
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
switches = []
switches.append("/p:Platform=ARM")
# build release version of lib
config = "Release"
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
# build debug version of lib
config = "Debug"
vstudio.buildProject(projectFile, targets, config, in_switches=switches)
print()
print("Zipping library")
rootPath = os.path.abspath("..")
binPath = projectPath + os.sep + "solutions" + os.sep + "windowsUniversal_vc140" + os.sep + "brainCloud" + os.sep + "Output"
# zip up build directly from source files
with zipfile.ZipFile("artifacts" + os.sep + "brainCloudClient_WindowsUniversal_" + version + ".zip", "w", compression=zipfile.ZIP_DEFLATED) as myzip:
for fname in glob.iglob(binPath + os.sep + "ARM" + os.sep + "Release" + os.sep + "*.*"):
myzip.write(fname, "lib" + os.sep + "ARM" + os.sep + "release" + os.sep + os.path.basename(fname))
for fname in glob.iglob(binPath + os.sep + "ARM" + os.sep + "Debug" + os.sep + "*.*"):
myzip.write(fname, "lib" + os.sep + "ARM" + os.sep + "debug" + os.sep + os.path.basename(fname))
for fname in glob.iglob(binPath + os.sep + "Win32" + os.sep + "Release" + os.sep + "*.*"):
myzip.write(fname, "lib" + os.sep + "win32" + os.sep + "release" + os.sep + os.path.basename(fname))
for fname in glob.iglob(binPath + os.sep + "Win32" + os.sep + "Debug" + os.sep + "*.*"):
myzip.write(fname, "lib" + os.sep + "win32" + os.sep + "debug" + os.sep + os.path.basename(fname))
for fname in glob.iglob(binPath + os.sep + "x64" + os.sep + "Release" + os.sep + "*.*"):
myzip.write(fname, "lib" + os.sep + "x64" + os.sep + "release" + os.sep + os.path.basename(fname))
for fname in glob.iglob(binPath + os.sep + "x64" + os.sep + "Debug" + os.sep + "*.*"):
myzip.write(fname, "lib" + os.sep + "x64" + os.sep + "debug" + os.sep + os.path.basename(fname))
util.zipdir(rootPath + os.sep + "include" + os.sep, myzip, "include")
util.zipdir(rootPath + os.sep + "lib" + os.sep + "jsoncpp-1.0.0", myzip, "thirdparty" + os.sep + "jsoncpp-1.0.0")
myzip.write("artifacts" + os.sep + "README.TXT", "README.TXT")
return
def main():
parser = argparse.ArgumentParser(description="Run the build")
parser.add_argument("--winDesktop", dest="buildWinDesktop", action="store_true", help="Build for win7 + win8 + win10 desktop")
parser.add_argument("--winStore", dest="buildWinStore", action="store_true", help="Build for windows store apps (and phone 8.0/8.1)")
parser.add_argument("--winUwp", dest="buildWinUwp", action="store_true", help="Build for windows universal apps (win10)")
parser.add_argument("--baseVersion", dest="baseVersion", action="store", required=True, help="Set the library version ie 2.23.0")
parser.add_argument("--rebuild", dest="rebuild", action="store_true", help="Rebuild solution instead of just build")
args = parser.parse_args()
# general vars
scmRev = createVersionNumber()
version = args.baseVersion + "." + scmRev
# general vars
artifacts = os.path.abspath("artifacts")
# clean up old builds
cleanArtifacts(artifacts)
if args.buildWinDesktop:
stampReadme("Windows Desktop", version)
buildWinDesktop(artifacts, version, args.rebuild)
if args.buildWinStore:
stampReadme("Windows Store", version)
buildWinStore(artifacts, version, args.rebuild)
if args.buildWinUwp:
stampReadme("Windows Universal", version)
buildWinUwp(artifacts, version, args.rebuild)
return
def test():
with zipfile.ZipFile("x.zip", "w", compression=zipfile.ZIP_DEFLATED) as myzip:
util.zipdir("tmp", myzip, "thirdparty" + os.sep + "casablanca", ["tmp/ignore"], ["*.meta"])
return
#test()
main()
|
|
"""
Unit tests for ./yaml_parse.py
"""
import os
import numpy as np
import cPickle
import tempfile
from numpy.testing import assert_
from pylearn2.config.yaml_parse import load, load_path, initialize
from os import environ
from decimal import Decimal
import yaml
from pylearn2.models.mlp import MLP, Sigmoid
from pylearn2.models.rbm import GaussianBinaryRBM
from pylearn2.space import Conv2DSpace
from pylearn2.linear.conv2d import make_random_conv2D
from pylearn2.energy_functions.rbm_energy import grbm_type_1
def test_load_path():
fd, fname = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as f:
f.write("a: 23")
loaded = load_path(fname)
assert_(loaded['a'] == 23)
os.remove(fname)
def test_obj():
loaded = load("a: !obj:decimal.Decimal { value : '1.23' }")
assert_(isinstance(loaded['a'], Decimal))
def test_floats():
loaded = load("a: { a: -1.23, b: 1.23e-1 }")
assert_(isinstance(loaded['a']['a'], float))
assert_(isinstance(loaded['a']['b'], float))
assert_((loaded['a']['a'] + 1.23) < 1e-3)
assert_((loaded['a']['b'] - 1.23e-1) < 1e-3)
def test_import():
loaded = load("a: !import 'decimal.Decimal'")
assert_(loaded['a'] == Decimal)
def test_import_string():
loaded = load("a: !import decimal.Decimal")
assert_(loaded['a'] == Decimal)
def test_import_colon():
loaded = load("a: !import:decimal.Decimal")
assert_(loaded['a'] == Decimal)
def test_preproc_rhs():
environ['TEST_VAR'] = '10'
loaded = load('a: "${TEST_VAR}"')
print "loaded['a'] is %s" % loaded['a']
assert_(loaded['a'] == "10")
del environ['TEST_VAR']
def test_preproc_pkl():
fd, fname = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as f:
d = ('a', 1)
cPickle.dump(d, f)
environ['TEST_VAR'] = fname
loaded = load('a: !pkl: "${TEST_VAR}"')
assert_(loaded['a'] == d)
del environ['TEST_VAR']
def test_late_preproc_pkl():
fd, fname = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as f:
array = np.arange(10)
np.save(f, array)
environ['TEST_VAR'] = fname
loaded = load('a: !obj:pylearn2.datasets.npy_npz.NpyDataset '
'{ file: "${TEST_VAR}"}\n')
# Assert the unsubstituted TEST_VAR is in yaml_src
assert_(loaded['a'].yaml_src.find("${TEST_VAR}") != -1)
del environ['TEST_VAR']
def test_unpickle():
fd, fname = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as f:
d = {'a': 1, 'b': 2}
cPickle.dump(d, f)
loaded = load("{'a': !pkl: '%s'}" % fname)
assert_(loaded['a'] == d)
os.remove(fname)
def test_unpickle_key():
fd, fname = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as f:
d = ('a', 1)
cPickle.dump(d, f)
loaded = load("{!pkl: '%s': 50}" % fname)
assert_(loaded.keys()[0] == d)
assert_(loaded.values()[0] == 50)
os.remove(fname)
def test_multi_constructor_obj():
"""
Tests whether multi_constructor_obj throws an exception when
the keys in mapping are None.
"""
try:
loaded = load("a: !obj:decimal.Decimal { 1 }")
except TypeError as e:
assert str(e) == "Received non string object (1) as key in mapping."
pass
except Exception, e:
error_msg = "Got the unexpected error: %s" % (e)
raise ValueError(error_msg)
def test_duplicate_keywords():
"""
Tests whether there are doublicate keywords in the yaml
"""
initialize()
yamlfile = """{
"model": !obj:pylearn2.models.mlp.MLP {
"layers": [
!obj:pylearn2.models.mlp.Sigmoid {
"layer_name": 'h0',
"dim": 20,
"sparse_init": 15,
}],
"nvis": 784,
"nvis": 384,
}
}"""
try:
loaded = load(yamlfile)
except yaml.constructor.ConstructorError, e:
message = str(e)
assert message.endswith("found duplicate key (nvis)")
pass
except Exception, e:
error_msg = "Got the unexpected error: %s" % (e)
raise TypeError(error_msg)
def test_duplicate_keywords_2():
"""
Tests whether duplicate keywords as independent parameters works fine.
"""
initialize()
yamlfile = """{
"model": !obj:pylearn2.models.rbm.GaussianBinaryRBM {
"vis_space" : &vis_space !obj:pylearn2.space.Conv2DSpace {
"shape" : [32,32],
"num_channels" : 3
},
"hid_space" : &hid_space !obj:pylearn2.space.Conv2DSpace {
"shape" : [27,27],
"num_channels" : 10
},
"transformer" :
!obj:pylearn2.linear.conv2d.make_random_conv2D {
"irange" : .05,
"input_space" : *vis_space,
"output_space" : *hid_space,
"kernel_shape" : [6,6],
"batch_size" : &batch_size 5
},
"energy_function_class" :
!obj:pylearn2.energy_functions.rbm_energy.grbm_type_1 {},
"learn_sigma" : True,
"init_sigma" : .3333,
"init_bias_hid" : -2.,
"mean_vis" : False,
"sigma_lr_scale" : 1e-3
}
}"""
loaded = load(yamlfile)
def test_parse_null_as_none():
"""
Tests whether None may be passed via yaml kwarg null.
"""
initialize()
yamlfile = """{
"model": !obj:pylearn2.models.autoencoder.Autoencoder {
"nvis" : 1024,
"nhid" : 64,
"act_enc" : Null,
"act_dec" : null
}
}"""
if __name__ == "__main__":
test_multi_constructor_obj()
test_duplicate_keywords()
test_duplicate_keywords_2()
test_unpickle_key()
|
|
"""This package contains the "front end" classes and functions
for Beaker caching.
Included are the :class:`.Cache` and :class:`.CacheManager` classes,
as well as the function decorators :func:`.region_decorate`,
:func:`.region_invalidate`.
"""
import warnings
import beaker.container as container
import beaker.util as util
from beaker.crypto.util import sha1
from beaker.exceptions import BeakerException, InvalidCacheBackendError
from beaker.synchronization import _threading
import beaker.ext.memcached as memcached
import beaker.ext.database as database
import beaker.ext.sqla as sqla
import beaker.ext.google as google
# Initialize the cache region dict
cache_regions = {}
"""Dictionary of 'region' arguments.
A "region" is a string name that refers to a series of cache
configuration arguments. An application may have multiple
"regions" - one which stores things in a memory cache, one
which writes data to files, etc.
The dictionary stores string key names mapped to dictionaries
of configuration arguments. Example::
from beaker.cache import cache_regions
cache_regions.update({
'short_term':{
'expire':'60',
'type':'memory'
},
'long_term':{
'expire':'1800',
'type':'dbm',
'data_dir':'/tmp',
}
})
"""
cache_managers = {}
class _backends(object):
initialized = False
def __init__(self, clsmap):
self._clsmap = clsmap
self._mutex = _threading.Lock()
def __getitem__(self, key):
try:
return self._clsmap[key]
except (KeyError, e):
if not self.initialized:
self._mutex.acquire()
try:
if not self.initialized:
self._init()
self.initialized = True
return self._clsmap[key]
finally:
self._mutex.release()
raise e
def _init(self):
try:
import pkg_resources
# Load up the additional entry point defined backends
for entry_point in pkg_resources.iter_entry_points('beaker.backends'):
try:
namespace_manager = entry_point.load()
name = entry_point.name
if name in self._clsmap:
raise BeakerException("NamespaceManager name conflict,'%s' "
"already loaded" % name)
self._clsmap[name] = namespace_manager
except (InvalidCacheBackendError, SyntaxError):
# Ignore invalid backends
pass
except:
import sys
from pkg_resources import DistributionNotFound
# Warn when there's a problem loading a NamespaceManager
if not isinstance(sys.exc_info()[1], DistributionNotFound):
import traceback
from StringIO import StringIO
tb = StringIO()
traceback.print_exc(file=tb)
warnings.warn(
"Unable to load NamespaceManager "
"entry point: '%s': %s" % (
entry_point,
tb.getvalue()),
RuntimeWarning, 2)
except ImportError:
pass
# Initialize the basic available backends
clsmap = _backends({
'memory': container.MemoryNamespaceManager,
'dbm': container.DBMNamespaceManager,
'file': container.FileNamespaceManager,
'ext:memcached': memcached.MemcachedNamespaceManager,
'ext:database': database.DatabaseNamespaceManager,
'ext:sqla': sqla.SqlaNamespaceManager,
'ext:google': google.GoogleNamespaceManager,
})
def cache_region(region, *args):
"""Decorate a function such that its return result is cached,
using a "region" to indicate the cache arguments.
Example::
from beaker.cache import cache_regions, cache_region
# configure regions
cache_regions.update({
'short_term':{
'expire':'60',
'type':'memory'
}
})
@cache_region('short_term', 'load_things')
def load(search_term, limit, offset):
'''Load from a database given a search term, limit, offset.'''
return database.query(search_term)[offset:offset + limit]
The decorator can also be used with object methods. The ``self``
argument is not part of the cache key. This is based on the
actual string name ``self`` being in the first argument
position (new in 1.6)::
class MyThing(object):
@cache_region('short_term', 'load_things')
def load(self, search_term, limit, offset):
'''Load from a database given a search term, limit, offset.'''
return database.query(search_term)[offset:offset + limit]
Classmethods work as well - use ``cls`` as the name of the class argument,
and place the decorator around the function underneath ``@classmethod``
(new in 1.6)::
class MyThing(object):
@classmethod
@cache_region('short_term', 'load_things')
def load(cls, search_term, limit, offset):
'''Load from a database given a search term, limit, offset.'''
return database.query(search_term)[offset:offset + limit]
:param region: String name of the region corresponding to the desired
caching arguments, established in :attr:`.cache_regions`.
:param \*args: Optional ``str()``-compatible arguments which will uniquely
identify the key used by this decorated function, in addition
to the positional arguments passed to the function itself at call time.
This is recommended as it is needed to distinguish between any two functions
or methods that have the same name (regardless of parent class or not).
.. note::
The function being decorated must only be called with
positional arguments, and the arguments must support
being stringified with ``str()``. The concatenation
of the ``str()`` version of each argument, combined
with that of the ``*args`` sent to the decorator,
forms the unique cache key.
.. note::
When a method on a class is decorated, the ``self`` or ``cls``
argument in the first position is
not included in the "key" used for caching. New in 1.6.
"""
return _cache_decorate(args, None, None, region)
def region_invalidate(namespace, region, *args):
"""Invalidate a cache region corresponding to a function
decorated with :func:`.cache_region`.
:param namespace: The namespace of the cache to invalidate. This is typically
a reference to the original function (as returned by the :func:`.cache_region`
decorator), where the :func:`.cache_region` decorator applies a "memo" to
the function in order to locate the string name of the namespace.
:param region: String name of the region used with the decorator. This can be
``None`` in the usual case that the decorated function itself is passed,
not the string name of the namespace.
:param args: Stringifyable arguments that are used to locate the correct
key. This consists of the ``*args`` sent to the :func:`.cache_region`
decorator itself, plus the ``*args`` sent to the function itself
at runtime.
Example::
from beaker.cache import cache_regions, cache_region, region_invalidate
# configure regions
cache_regions.update({
'short_term':{
'expire':'60',
'type':'memory'
}
})
@cache_region('short_term', 'load_data')
def load(search_term, limit, offset):
'''Load from a database given a search term, limit, offset.'''
return database.query(search_term)[offset:offset + limit]
def invalidate_search(search_term, limit, offset):
'''Invalidate the cached storage for a given search term, limit, offset.'''
region_invalidate(load, 'short_term', 'load_data', search_term, limit, offset)
Note that when a method on a class is decorated, the first argument ``cls``
or ``self`` is not included in the cache key. This means you don't send
it to :func:`.region_invalidate`::
class MyThing(object):
@cache_region('short_term', 'some_data')
def load(self, search_term, limit, offset):
'''Load from a database given a search term, limit, offset.'''
return database.query(search_term)[offset:offset + limit]
def invalidate_search(self, search_term, limit, offset):
'''Invalidate the cached storage for a given search term, limit, offset.'''
region_invalidate(self.load, 'short_term', 'some_data', search_term, limit, offset)
"""
if callable(namespace):
if not region:
region = namespace._arg_region
namespace = namespace._arg_namespace
if not region:
raise BeakerException("Region or callable function "
"namespace is required")
else:
region = cache_regions[region]
cache = Cache._get_cache(namespace, region)
_cache_decorator_invalidate(cache, region['key_length'], args)
class Cache(object):
"""Front-end to the containment API implementing a data cache.
:param namespace: the namespace of this Cache
:param type: type of cache to use
:param expire: seconds to keep cached data
:param expiretime: seconds to keep cached data (legacy support)
:param starttime: time when cache was cache was
"""
def __init__(self, namespace, type='memory', expiretime=None,
starttime=None, expire=None, **nsargs):
try:
cls = clsmap[type]
if isinstance(cls, InvalidCacheBackendError):
raise cls
except KeyError:
raise TypeError("Unknown cache implementation %r" % type)
self.namespace_name = namespace
self.namespace = cls(namespace, **nsargs)
self.expiretime = expiretime or expire
self.starttime = starttime
self.nsargs = nsargs
@classmethod
def _get_cache(cls, namespace, kw):
key = namespace + str(kw)
try:
return cache_managers[key]
except KeyError:
cache_managers[key] = cache = cls(namespace, **kw)
return cache
def put(self, key, value, **kw):
self._get_value(key, **kw).set_value(value)
set_value = put
def get(self, key, **kw):
"""Retrieve a cached value from the container"""
return self._get_value(key, **kw).get_value()
get_value = get
def remove_value(self, key, **kw):
mycontainer = self._get_value(key, **kw)
mycontainer.clear_value()
remove = remove_value
def _get_value(self, key, **kw):
if isinstance(key, unicode):
key = key.encode('ascii', 'backslashreplace')
if 'type' in kw:
return self._legacy_get_value(key, **kw)
kw.setdefault('expiretime', self.expiretime)
kw.setdefault('starttime', self.starttime)
return container.Value(key, self.namespace, **kw)
@util.deprecated("Specifying a "
"'type' and other namespace configuration with cache.get()/put()/etc. "
"is deprecated. Specify 'type' and other namespace configuration to "
"cache_manager.get_cache() and/or the Cache constructor instead.")
def _legacy_get_value(self, key, type, **kw):
expiretime = kw.pop('expiretime', self.expiretime)
starttime = kw.pop('starttime', None)
createfunc = kw.pop('createfunc', None)
kwargs = self.nsargs.copy()
kwargs.update(kw)
c = Cache(self.namespace.namespace, type=type, **kwargs)
return c._get_value(key, expiretime=expiretime, createfunc=createfunc,
starttime=starttime)
def clear(self):
"""Clear all the values from the namespace"""
self.namespace.remove()
# dict interface
def __getitem__(self, key):
return self.get(key)
def __contains__(self, key):
return self._get_value(key).has_current_value()
def has_key(self, key):
return key in self
def __delitem__(self, key):
self.remove_value(key)
def __setitem__(self, key, value):
self.put(key, value)
class CacheManager(object):
def __init__(self, **kwargs):
"""Initialize a CacheManager object with a set of options
Options should be parsed with the
:func:`~beaker.util.parse_cache_config_options` function to
ensure only valid options are used.
"""
self.kwargs = kwargs
self.regions = kwargs.pop('cache_regions', {})
# Add these regions to the module global
cache_regions.update(self.regions)
def get_cache(self, name, **kwargs):
kw = self.kwargs.copy()
kw.update(kwargs)
return Cache._get_cache(name, kw)
def get_cache_region(self, name, region):
if region not in self.regions:
raise BeakerException('Cache region not configured: %s' % region)
kw = self.regions[region]
return Cache._get_cache(name, kw)
def region(self, region, *args):
"""Decorate a function to cache itself using a cache region
The region decorator requires arguments if there are more than
two of the same named function, in the same module. This is
because the namespace used for the functions cache is based on
the functions name and the module.
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things():
@cache.region('short_term', 'some_data')
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
return load('rabbits', 20, 0)
.. note::
The function being decorated must only be called with
positional arguments.
"""
return cache_region(region, *args)
def region_invalidate(self, namespace, region, *args):
"""Invalidate a cache region namespace or decorated function
This function only invalidates cache spaces created with the
cache_region decorator.
:param namespace: Either the namespace of the result to invalidate, or the
cached function
:param region: The region the function was cached to. If the function was
cached to a single region then this argument can be None
:param args: Arguments that were used to differentiate the cached
function as well as the arguments passed to the decorated
function
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things(invalidate=False):
@cache.region('short_term', 'some_data')
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
# If the results should be invalidated first
if invalidate:
cache.region_invalidate(load, None, 'some_data',
'rabbits', 20, 0)
return load('rabbits', 20, 0)
"""
return region_invalidate(namespace, region, *args)
def cache(self, *args, **kwargs):
"""Decorate a function to cache itself with supplied parameters
:param args: Used to make the key unique for this function, as in region()
above.
:param kwargs: Parameters to be passed to get_cache(), will override defaults
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things():
@cache.cache('mycache', expire=15)
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
return load('rabbits', 20, 0)
.. note::
The function being decorated must only be called with
positional arguments.
"""
return _cache_decorate(args, self, kwargs, None)
def invalidate(self, func, *args, **kwargs):
"""Invalidate a cache decorated function
This function only invalidates cache spaces created with the
cache decorator.
:param func: Decorated function to invalidate
:param args: Used to make the key unique for this function, as in region()
above.
:param kwargs: Parameters that were passed for use by get_cache(), note that
this is only required if a ``type`` was specified for the
function
Example::
# Assuming a cache object is available like:
cache = CacheManager(dict_of_config_options)
def populate_things(invalidate=False):
@cache.cache('mycache', type="file", expire=15)
def load(search_term, limit, offset):
return load_the_data(search_term, limit, offset)
# If the results should be invalidated first
if invalidate:
cache.invalidate(load, 'mycache', 'rabbits', 20, 0, type="file")
return load('rabbits', 20, 0)
"""
namespace = func._arg_namespace
cache = self.get_cache(namespace, **kwargs)
if hasattr(func, '_arg_region'):
key_length = cache_regions[func._arg_region]['key_length']
else:
key_length = kwargs.pop('key_length', 250)
_cache_decorator_invalidate(cache, key_length, args)
def _cache_decorate(deco_args, manager, kwargs, region):
"""Return a caching function decorator."""
cache = [None]
def decorate(func):
namespace = util.func_namespace(func)
skip_self = util.has_self_arg(func)
def cached(*args):
if not cache[0]:
if region is not None:
if region not in cache_regions:
raise BeakerException(
'Cache region not configured: %s' % region)
reg = cache_regions[region]
if not reg.get('enabled', True):
return func(*args)
cache[0] = Cache._get_cache(namespace, reg)
elif manager:
cache[0] = manager.get_cache(namespace, **kwargs)
else:
raise Exception("'manager + kwargs' or 'region' "
"argument is required")
if skip_self:
try:
cache_key = " ".join(map(str, deco_args + args[1:]))
except UnicodeEncodeError:
cache_key = " ".join(map(unicode, deco_args + args[1:]))
else:
try:
cache_key = " ".join(map(str, deco_args + args))
except UnicodeEncodeError:
cache_key = " ".join(map(unicode, deco_args + args))
if region:
key_length = cache_regions[region]['key_length']
else:
key_length = kwargs.pop('key_length', 250)
if len(cache_key) + len(namespace) > int(key_length):
if util.py3k:
cache_key = cache_key.encode('utf-8')
cache_key = sha1(cache_key).hexdigest()
def go():
return func(*args)
return cache[0].get_value(cache_key, createfunc=go)
cached._arg_namespace = namespace
if region is not None:
cached._arg_region = region
return cached
return decorate
def _cache_decorator_invalidate(cache, key_length, args):
"""Invalidate a cache key based on function arguments."""
try:
cache_key = " ".join(map(str, args))
except UnicodeEncodeError:
cache_key = " ".join(map(unicode, args))
if len(cache_key) + len(cache.namespace_name) > key_length:
if util.py3k:
cache_key = cache_key.encode('utf-8')
cache_key = sha1(cache_key).hexdigest()
cache.remove_value(cache_key)
|
|
#!/usr/bin/env python
"""Build the SeqAn Releases Website."""
import operator
import optparse
import os
import os.path
import re
import sys
import xml.sax.saxutils
import pyratemp
# Patterns matching seqan apps and library.
LIBRARY_PATTERN = (r'seqan-library-([0-9])\.([0-9])(?:\.([0-9]))?\.'
'(tar\.gz|tar\.bz2|zip)')
APPS_PATTERN = (r'seqan-apps-([0-9])\.([0-9])(?:\.([0-9]))?-'
'(Linux|Mac|Windows)-(x86_64|i686)?'
'\.(tar\.gz|tar\.bz2|zip|exe)')
# The regular expression to use for matching patterns.
PACKAGE_PATTERN = (r'(.*)-([0-9]+)\.([0-9]+)(?:\.([0-9]+))?-'
'(Linux|Mac|Windows)-(x86_64|i686)?'
'\.(tar\.gz|tar\.bz2|zip|exe)')
# The operating systems that we expect.
OPERATING_SYSTEMS = ['Linux', 'Mac', 'Windows', 'src']
# The architectures that we expect.
ARCHITECTURES = ['x86_64', 'i686', 'src']
# The file formats.
FORMATS = ['tar.gz', 'tar.bz2', 'zip', 'exe']
# Path to template.
TPL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'release_page.html')
PACKAGE_TPL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'one_package.html')
# Base URL for links.
BASE_URL='http://packages.seqan.de'
class Arch(object):
def __init__(self, name):
self.name = name
self.files = {}
class Packages(object):
def __init__(self, os_):
self.os = os_
self.archs = {}
for arch in ARCHITECTURES:
self.archs[arch] = Arch(arch)
class Version(object):
def __init__(self, version):
self.version = version
self.packages = {}
for os_ in OPERATING_SYSTEMS:
self.packages[os_] = Packages(os_)
class Software(object):
def __init__(self, name):
self.name = name
self.versions = {}
class PackageDatabase(object):
def __init__(self, path):
self.path = path
self.seqan_apps = Software('SeqAn Apps')
self.seqan_library = Software('SeqAn Library')
self.softwares = {}
def load(self):
# Two craw directory structure by two levels.
xs = []
for x in os.listdir(self.path):
if os.path.isdir(os.path.join(self.path, x)):
for y in os.listdir(os.path.join(self.path, x)):
xs.append(y)
for x in xs:
if re.match(LIBRARY_PATTERN, x):
major, minor, patch, suffix = re.match(LIBRARY_PATTERN, x).groups()
if not patch:
patch = '0'
major_minor_patch = '%s.%s.%s' % (major, minor, patch)
software = self.seqan_library
if not major_minor_patch in software.versions:
software.versions[major_minor_patch] = Version(major_minor_patch)
version = software.versions[major_minor_patch]
version.packages['src'].archs['src'].files[suffix] = x
elif re.match(APPS_PATTERN, x):
major, minor, patch, os_, arch, suffix = re.match(APPS_PATTERN, x).groups()
if not patch:
patch = '0'
major_minor_patch = '%s.%s.%s' % (major, minor, patch)
software = self.seqan_apps
if not major_minor_patch in software.versions:
software.versions[major_minor_patch] = Version(major_minor_patch)
version = software.versions[major_minor_patch]
version.packages[os_].archs[arch].files[suffix] = x
elif re.match(PACKAGE_PATTERN, x): # individual apps
filename = x
name, major, minor, patch, os_, arch, suffix = re.match(PACKAGE_PATTERN, x).groups()
if not patch:
patch = '0'
major_minor_patch = '%s.%s.%s' % (major, minor, patch)
if not name in self.softwares:
self.softwares[name] = Software(name)
software = self.softwares[name]
if not major_minor_patch in software.versions:
software.versions[major_minor_patch] = Version(major_minor_patch)
version = software.versions[major_minor_patch]
version.packages[os_].archs[arch].files[suffix] = filename
else:
pass
class RssItem(object):
"""One RSS item."""
def __init__(self, title, description, link):
self.title = title
self.description = description
self.link = link
def generate(self):
tpl = ('<item>\n'
' <title>%s</title>\n'
' <summary>%s</summary>\n'
' <link>%s</link>\n'
'</item>\n')
return tpl % (self.title, self.description, self.link)
class RssFeed(object):
"""Feed with one channel."""
def __init__(self, title, description, link):
self.title = title
self.description = description
self.link = link
self.items = []
def generate(self):
tpl = ('<?xml version="1.0" encoding="UTF-8" ?>\n'
'<rss version="2.0">\n'
' <title>%s</title>\n'
' <description>%s</description>\n'
'\n'
'%s'
'</rss>\n')
items_s = '\n'.join([i.generate() for i in self.items])
return tpl % (self.title, self.description, items_s)
class RssWriter(object):
"""Writing of RSS files for a PackageDB."""
def __init__(self, out_dir, package_db, base_url):
self.out_dir = out_dir
self.package_db = package_db
self.base_url = base_url
def generate(self):
"""Create output RSS files."""
for sname, software in self.package_db.softwares.items():
feed = RssFeed(sname, '', '')
for vname, version in software.versions.items():
description = 'Version %s of %s.' % (vname, sname)
link = '%s/%s#%s' % (self.base_url, sname, vname)
item = RssItem('%s %s' % (sname, vname), description, link)
feed.items.append(item)
path = os.path.join(self.out_dir, sname, 'package.rss')
print >>sys.stderr, 'Writing %s' % path
with open(path, 'wb') as f:
f.write(feed.generate())
def work(options):
print >>sys.stderr, 'Generating Release Site.'
print >>sys.stderr, 'Package Dir: %s' % (options.package_db,)
print >>sys.stderr, 'Out file: %s' % (options.out_file,)
db = PackageDatabase(options.package_db)
db.load()
# Load and render overview template.
tpl = pyratemp.Template(filename=TPL_PATH)
with open(options.out_file, 'wb') as f:
f.write(tpl(FORMATS=FORMATS,
seqan_apps=db.seqan_apps,
seqan_library=db.seqan_library,
softwares=db.softwares,
sorted=sorted))
# Load and render package template.
tpl = pyratemp.Template(filename=PACKAGE_TPL_PATH)
for sname, software in db.softwares.items():
out_path = os.path.join(options.package_db, sname, 'index.html')
print >>sys.stderr, 'Writing %s.' % out_path
with open(out_path, 'wb') as f:
f.write(tpl(FORMATS=FORMATS,
software=software,
sorted=sorted))
# Write out RSS feeds for the packages.
rss_writer = RssWriter(options.package_db, db, options.base_url)
rss_writer.generate()
def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--package-db', dest='package_db',
help='Path to directory with package files.')
parser.add_option('-o', '--out-file', dest='out_file',
help='Path to the HTML file to generate.')
parser.add_option('-b', '--base-url', dest='base_url',
help='Base URL.', default=BASE_URL)
options, args = parser.parse_args()
if args:
parser.error('No arguments expected!')
return 1
if not options.package_db:
parser.error('Option --package-db/-d is required!')
return 1
if not options.out_file:
parser.error('Option --out-file/-o is required!')
return 1
return work(options)
|
|
"""
smartaws.ec2.vpc
~~~~~~~~~~~~~~~~
This module manages Amazon VPC objects and related resources.
:copyright: (c) 2016 by Yann Lambret.
:license: MIT, see LICENSE for more details.
"""
from boto3 import Session
from botocore.exceptions import ClientError
from botocore.exceptions import ParamValidationError
from smartaws import log
from smartaws.ec2 import base
logger = log.get_logger(__name__)
class VpcWrapper(base.BaseWrapper):
"""Wrapper class for :class:`boto3.resources.factory.ec2.Vpc` objects."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _add_wrapper(base_classes, **kwargs):
"""
Register the :class:`VpcWrapper` class.
See `Boto 3 Extensibility Guide
<http://boto3.readthedocs.org/en/latest/guide/events.html>`_
"""
base_classes.insert(0, VpcWrapper)
class VpcHandler(base.BaseHandler):
"""Manage a :class:`VpcWrapper` object's lifecycle."""
def __init__(self, ctx, credentials):
"""Create boto3 session and clients."""
super().__init__()
# Create a boto session and add extra
# features to the base class EC2.Vpc
event = 'creating-resource-class.ec2.Vpc'
self._session = Session(**credentials)
self._session.events.register(event, _add_wrapper)
# boto3 clients
self._ec2 = self._session.resource('ec2')
self._client = self._session.client('ec2')
# Context for the resource we want
# to create, update or delete
self._ctx = ctx
self._name = ctx.get('name')
def create_resource(self):
"""Create a VPC, or update it if it already exists."""
try:
self._resource = self._load(self._name, manager_name='vpcs')
except ValueError as err:
logger.critical(err)
return
if self._resource:
logger.info('VPC %(name)s|%(cidr_block)s already exists.', self._ctx)
else:
self._create_vpc()
# Store the object in the cache for future use
with self._lock:
self._cache[self._name] = self._resource
def update_resource(self):
"""Update a VPC."""
try:
self._resource = self._load(self._name, manager_name='vpcs')
except ValueError as err:
logger.critical(err)
return
if self._resource:
self._update_vpc()
# Store the object in the cache for future use
with self._lock:
self._cache[self._name] = self._resource
else:
logger.error('VPC %s does not exist.', self._name)
def delete_resource(self):
"""Delete a :class:`VpcWrapper` object."""
try:
self._resource = self._load(self._name, manager_name='vpcs')
except ValueError as err:
logger.critical(err)
return
if self._resource:
# If an internet gateway is attached to the VPC,
# we try to detach it first
filters = [{
'Name': 'attachment.vpc-id',
'Values': [self._resource.id]
}]
items = list(self._resource.internet_gateways.filter(Filters=filters))
if items:
igw = items[0]
try:
self._resource.detach_internet_gateway(InternetGatewayId=igw.id)
except ClientError as exc:
logger.error(exc)
# Delete the VPC
logger.info('Removing VPC %(name)s|%(cidr_block)s.', self._ctx)
try:
self._client.delete_vpc(VpcId=self._resource.id)
except ClientError as exc:
logger.error(exc)
else:
logger.error('VPC %s does not exist.', self._name)
def _create_vpc(self):
"""Create a :class:`VpcWrapper` object."""
logger.info('Creating VPC %(name)s|%(cidr_block)s.', self._ctx)
try:
self._resource = self._ec2.create_vpc(CidrBlock=self._ctx['cidr_block'])
except ClientError as exc:
logger.error(exc)
return
# Wait for the new VPC to become
# available before going any further
waiter = self._client.get_waiter('vpc_available')
waiter.wait(VpcIds=[self._resource.id])
# Set the value of the 'Name' tag
self._resource.name = self._name
# Update other object attributes
self._update_vpc()
def _update_vpc(self):
"""Update a :class:`VpcWrapper` object."""
# Set VPC attributes:
#
# - EnableDnsSupport
# - EnableDnsHostnames
#
enable_dns_support = self._ctx.get('enable_dns_support', True)
enable_dns_hostnames = self._ctx.get('enable_dns_hostnames', False)
try:
response = self._resource.describe_attribute(Attribute='enableDnsSupport')
attr = response.get('EnableDnsSupport')
if enable_dns_support != attr.get('Value'):
logger.info(
"Setting 'enable_dns_support' attribute to '%s' for VPC %s.",
enable_dns_support, self._name
)
self._resource.modify_attribute(EnableDnsSupport={'Value': enable_dns_support})
except (ClientError, ParamValidationError) as err:
logger.error(err)
try:
response = self._resource.describe_attribute(Attribute='enableDnsHostnames')
attr = response.get('EnableDnsHostnames')
if enable_dns_hostnames != attr.get('Value'):
logger.info(
"Setting 'enable_dns_hostnames' attribute to '%s' for VPC %s.",
enable_dns_hostnames, self._name
)
self._resource.modify_attribute(EnableDnsHostnames={'Value': enable_dns_hostnames})
except (ClientError, ParamValidationError) as exc:
logger.error(exc)
self._update_tags()
self._manage_dhcp_options_set()
self._manage_internet_gateway()
def _manage_dhcp_options_set(self):
"""Manage the DHCP options set association."""
dhcp_name = self._ctx.get('dhcp_options')
if not dhcp_name:
return
# Try to load the ``EC2.DhcpOptions`` object
try:
dhcp = self._load(dhcp_name, manager_name='dhcp_options_sets')
except ValueError as err:
logger.critical(err)
return
if dhcp:
if dhcp.id != self._resource.dhcp_options_id:
# Associate DHCP options set
logger.info('Associating DHCP options set %s with VPC %s.', dhcp_name, self._name)
try:
self._resource.associate_dhcp_options(DhcpOptionsId=dhcp.id)
except ClientError as exc:
logger.error(exc)
else:
logger.error(
'Unable to associate DHCP options set %s with VPC %s'
' because it does not exist.', dhcp_name, self._name
)
def _manage_internet_gateway(self):
"""Manage the internet gateway attachment."""
igw_name = self._ctx.get('internet_gateway')
filters = [{'Name': 'attachment.vpc-id', 'Values': [self._resource.id]}]
internet_gateways = list(self._ec2.internet_gateways.filter(Filters=filters))
if not igw_name:
if internet_gateways:
# If an internet gateway is attached to the VPC but not defined in
# the configuration, we don't take the responsibility to detach it
logger.warning(
'An internet gateway is still attached to VPC %s, although there'
' is no corresponding element in the configuration.', self._name
)
return
# Try to load the ``EC2.InternetGateway`` object
try:
igw = self._load(igw_name, manager_name='internet_gateways')
except ValueError as err:
logger.critical(err)
return
if igw:
if igw.attachments:
vpc_id = igw.attachments[0]['VpcId']
if vpc_id != self._resource.id:
# The internet gateway is already attached to another VPC
logger.warning(
'Unable to attach internet gateway %s to VPC %s because it '
'is already attached to another VPC.', igw_name, self._name
)
elif internet_gateways:
logger.warning(
'Unable to attach internet gateway %s to VPC %s because another internet'
' gateway is already attached to the latter.', igw_name, self._name
)
else:
logger.info(
'Attaching internet gateway %s to VPC %s.', igw_name, self._name
)
try:
self._resource.attach_internet_gateway(InternetGatewayId=igw.id)
# Update cache to reflect new internet gateway status. If
# there is another attempt to attach this gateway during
# the same program execution, a warning will be raised.
igw.reload()
with self._lock:
self._cache[igw_name] = igw
except ClientError as exc:
logger.error(exc)
else:
logger.error(
'Unable to attach internet gateway %s to VPC %s '
'because it does not exist.', igw_name, self._name
)
def create_handler(ctx, credentials):
"""
Helper funcion to create a :class:`VpcHandler` object.
:param ctx:
A dictionary that contains specific values for a
:class:`boto3.resources.factory.ec2.Vpc` object
:param credentials:
A dictionary that contains AWS credentials and the target AWS region.
:return:
:class:`VpcHandler <VpcHandler>` object.
:rtype:
smartaws.ec2.vpc.VpcHandler
"""
return VpcHandler(ctx, credentials)
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_auth_portal
short_description: Configure firewall authentication portals in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and auth_portal category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
firewall_auth_portal:
description:
- Configure firewall authentication portals.
default: null
type: dict
suboptions:
groups:
description:
- Firewall user groups permitted to authenticate through this portal. Separate group names with spaces.
type: list
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
type: str
identity_based_route:
description:
- Name of the identity-based route that applies to this portal. Source firewall.identity-based-route.name.
type: str
portal_addr:
description:
- Address (or FQDN) of the authentication portal.
type: str
portal_addr6:
description:
- IPv6 address (or FQDN) of authentication portal.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure firewall authentication portals.
fortios_firewall_auth_portal:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
firewall_auth_portal:
groups:
-
name: "default_name_4 (source user.group.name)"
identity_based_route: "<your_own_value> (source firewall.identity-based-route.name)"
portal_addr: "<your_own_value>"
portal_addr6: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_auth_portal_data(json):
option_list = ['groups', 'identity_based_route', 'portal_addr',
'portal_addr6']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_auth_portal(data, fos):
vdom = data['vdom']
firewall_auth_portal_data = data['firewall_auth_portal']
filtered_data = underscore_to_hyphen(filter_firewall_auth_portal_data(firewall_auth_portal_data))
return fos.set('firewall',
'auth-portal',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_auth_portal']:
resp = firewall_auth_portal(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"firewall_auth_portal": {
"required": False, "type": "dict", "default": None,
"options": {
"groups": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"identity_based_route": {"required": False, "type": "str"},
"portal_addr": {"required": False, "type": "str"},
"portal_addr6": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Beam runner for testing/profiling worker code directly.
"""
import collections
import logging
import time
import apache_beam as beam
from apache_beam.internal import pickler
from apache_beam.io import iobase
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.options import pipeline_options
from apache_beam.runners import DataflowRunner
from apache_beam.runners.dataflow.internal.dependency import _dependency_file_copy
from apache_beam.runners.dataflow.internal.names import PropertyNames
from apache_beam.runners.dataflow.native_io.iobase import NativeSource
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.worker import operation_specs
from apache_beam.runners.worker import operations
from apache_beam.typehints import typehints
from apache_beam.utils import profiler
from apache_beam.utils.counters import CounterFactory
try:
from apache_beam.runners.worker import statesampler
except ImportError:
from apache_beam.runners.worker import statesampler_fake as statesampler
# This module is experimental. No backwards-compatibility guarantees.
class MapTaskExecutorRunner(PipelineRunner):
"""Beam runner translating a pipeline into map tasks that are then executed.
Primarily intended for testing and profiling the worker code paths.
"""
def __init__(self):
self.executors = []
def has_metrics_support(self):
"""Returns whether this runner supports metrics or not.
"""
return False
def run_pipeline(self, pipeline):
MetricsEnvironment.set_metrics_supported(self.has_metrics_support())
# List of map tasks Each map task is a list of
# (stage_name, operation_specs.WorkerOperation) instructions.
self.map_tasks = []
# Map of pvalues to
# (map_task_index, producer_operation_index, producer_output_index)
self.outputs = {}
# Unique mappings of PCollections to strings.
self.side_input_labels = collections.defaultdict(
lambda: str(len(self.side_input_labels)))
# Mapping of map task indices to all map tasks that must preceed them.
self.dependencies = collections.defaultdict(set)
# Visit the graph, building up the map_tasks and their metadata.
super(MapTaskExecutorRunner, self).run_pipeline(pipeline)
# Now run the tasks in topological order.
def compute_depth_map(deps):
memoized = {}
def compute_depth(x):
if x not in memoized:
memoized[x] = 1 + max([-1] + [compute_depth(y) for y in deps[x]])
return memoized[x]
return {x: compute_depth(x) for x in deps.keys()}
map_task_depths = compute_depth_map(self.dependencies)
ordered_map_tasks = sorted((map_task_depths.get(ix, -1), map_task)
for ix, map_task in enumerate(self.map_tasks))
profile_options = pipeline.options.view_as(
pipeline_options.ProfilingOptions)
if profile_options.profile_cpu:
with profiler.Profile(
profile_id='worker-runner',
profile_location=profile_options.profile_location,
log_results=True, file_copy_fn=_dependency_file_copy):
self.execute_map_tasks(ordered_map_tasks)
else:
self.execute_map_tasks(ordered_map_tasks)
return WorkerRunnerResult(PipelineState.UNKNOWN)
def metrics_containers(self):
return [op.metrics_container
for ex in self.executors
for op in ex.operations()]
def execute_map_tasks(self, ordered_map_tasks):
tt = time.time()
for ix, (_, map_task) in enumerate(ordered_map_tasks):
logging.info('Running %s', map_task)
t = time.time()
stage_names, all_operations = zip(*map_task)
# TODO(robertwb): The DataflowRunner worker receives system step names
# (e.g. "s3") that are used to label the output msec counters. We use the
# operation names here, but this is not the same scheme used by the
# DataflowRunner; the result is that the output msec counters are named
# differently.
system_names = stage_names
# Create the CounterFactory and StateSampler for this MapTask.
# TODO(robertwb): Output counters produced here are currently ignored.
counter_factory = CounterFactory()
state_sampler = statesampler.StateSampler('%s' % ix, counter_factory)
map_executor = operations.SimpleMapTaskExecutor(
operation_specs.MapTask(
all_operations, 'S%02d' % ix,
system_names, stage_names, system_names),
counter_factory,
state_sampler)
self.executors.append(map_executor)
map_executor.execute()
logging.info(
'Stage %s finished: %0.3f sec', stage_names[0], time.time() - t)
logging.info('Total time: %0.3f sec', time.time() - tt)
def run_Read(self, transform_node):
self._run_read_from(transform_node, transform_node.transform.source)
def _run_read_from(self, transform_node, source):
"""Used when this operation is the result of reading source."""
if not isinstance(source, NativeSource):
source = iobase.SourceBundle(1.0, source, None, None)
output = transform_node.outputs[None]
element_coder = self._get_coder(output)
read_op = operation_specs.WorkerRead(source, output_coders=[element_coder])
self.outputs[output] = len(self.map_tasks), 0, 0
self.map_tasks.append([(transform_node.full_label, read_op)])
return len(self.map_tasks) - 1
def run_ParDo(self, transform_node):
transform = transform_node.transform
output = transform_node.outputs[None]
element_coder = self._get_coder(output)
map_task_index, producer_index, output_index = self.outputs[
transform_node.inputs[0]]
# If any of this ParDo's side inputs depend on outputs from this map_task,
# we can't continue growing this map task.
def is_reachable(leaf, root):
if leaf == root:
return True
else:
return any(is_reachable(x, root) for x in self.dependencies[leaf])
if any(is_reachable(self.outputs[side_input.pvalue][0], map_task_index)
for side_input in transform_node.side_inputs):
# Start a new map tasks.
input_element_coder = self._get_coder(transform_node.inputs[0])
output_buffer = OutputBuffer(input_element_coder)
fusion_break_write = operation_specs.WorkerInMemoryWrite(
output_buffer=output_buffer,
write_windowed_values=True,
input=(producer_index, output_index),
output_coders=[input_element_coder])
self.map_tasks[map_task_index].append(
(transform_node.full_label + '/Write', fusion_break_write))
original_map_task_index = map_task_index
map_task_index, producer_index, output_index = len(self.map_tasks), 0, 0
fusion_break_read = operation_specs.WorkerRead(
output_buffer.source_bundle(),
output_coders=[input_element_coder])
self.map_tasks.append(
[(transform_node.full_label + '/Read', fusion_break_read)])
self.dependencies[map_task_index].add(original_map_task_index)
def create_side_read(side_input):
label = self.side_input_labels[side_input]
output_buffer = self.run_side_write(
side_input.pvalue, '%s/%s' % (transform_node.full_label, label))
return operation_specs.WorkerSideInputSource(
output_buffer.source(), label)
do_op = operation_specs.WorkerDoFn( #
serialized_fn=pickler.dumps(DataflowRunner._pardo_fn_data(
transform_node,
lambda side_input: self.side_input_labels[side_input])),
output_tags=[PropertyNames.OUT] + ['%s_%s' % (PropertyNames.OUT, tag)
for tag in transform.output_tags
],
# Same assumption that DataflowRunner has about coders being compatible
# across outputs.
output_coders=[element_coder] * (len(transform.output_tags) + 1),
input=(producer_index, output_index),
side_inputs=[create_side_read(side_input)
for side_input in transform_node.side_inputs])
producer_index = len(self.map_tasks[map_task_index])
self.outputs[transform_node.outputs[None]] = (
map_task_index, producer_index, 0)
for ix, tag in enumerate(transform.output_tags):
self.outputs[transform_node.outputs[
tag]] = map_task_index, producer_index, ix + 1
self.map_tasks[map_task_index].append((transform_node.full_label, do_op))
for side_input in transform_node.side_inputs:
self.dependencies[map_task_index].add(self.outputs[side_input.pvalue][0])
def run_side_write(self, pcoll, label):
map_task_index, producer_index, output_index = self.outputs[pcoll]
windowed_element_coder = self._get_coder(pcoll)
output_buffer = OutputBuffer(windowed_element_coder)
write_sideinput_op = operation_specs.WorkerInMemoryWrite(
output_buffer=output_buffer,
write_windowed_values=True,
input=(producer_index, output_index),
output_coders=[windowed_element_coder])
self.map_tasks[map_task_index].append(
(label, write_sideinput_op))
return output_buffer
def run__GroupByKeyOnly(self, transform_node):
map_task_index, producer_index, output_index = self.outputs[
transform_node.inputs[0]]
grouped_element_coder = self._get_coder(transform_node.outputs[None],
windowed=False)
windowed_ungrouped_element_coder = self._get_coder(transform_node.inputs[0])
output_buffer = GroupingOutputBuffer(grouped_element_coder)
shuffle_write = operation_specs.WorkerInMemoryWrite(
output_buffer=output_buffer,
write_windowed_values=False,
input=(producer_index, output_index),
output_coders=[windowed_ungrouped_element_coder])
self.map_tasks[map_task_index].append(
(transform_node.full_label + '/Write', shuffle_write))
output_map_task_index = self._run_read_from(
transform_node, output_buffer.source())
self.dependencies[output_map_task_index].add(map_task_index)
def run_Flatten(self, transform_node):
output_buffer = OutputBuffer(self._get_coder(transform_node.outputs[None]))
output_map_task = self._run_read_from(transform_node,
output_buffer.source())
for input in transform_node.inputs:
map_task_index, producer_index, output_index = self.outputs[input]
element_coder = self._get_coder(input)
flatten_write = operation_specs.WorkerInMemoryWrite(
output_buffer=output_buffer,
write_windowed_values=True,
input=(producer_index, output_index),
output_coders=[element_coder])
self.map_tasks[map_task_index].append(
(transform_node.full_label + '/Write', flatten_write))
self.dependencies[output_map_task].add(map_task_index)
def apply_CombinePerKey(self, transform, input):
# TODO(robertwb): Support side inputs.
assert not transform.args and not transform.kwargs
return (input
| PartialGroupByKeyCombineValues(transform.fn)
| beam.GroupByKey()
| MergeAccumulators(transform.fn)
| ExtractOutputs(transform.fn))
def run_PartialGroupByKeyCombineValues(self, transform_node):
element_coder = self._get_coder(transform_node.outputs[None])
_, producer_index, output_index = self.outputs[transform_node.inputs[0]]
combine_op = operation_specs.WorkerPartialGroupByKey(
combine_fn=pickler.dumps(
(transform_node.transform.combine_fn, (), {}, ())),
output_coders=[element_coder],
input=(producer_index, output_index))
self._run_as_op(transform_node, combine_op)
def run_MergeAccumulators(self, transform_node):
self._run_combine_transform(transform_node, 'merge')
def run_ExtractOutputs(self, transform_node):
self._run_combine_transform(transform_node, 'extract')
def _run_combine_transform(self, transform_node, phase):
transform = transform_node.transform
element_coder = self._get_coder(transform_node.outputs[None])
_, producer_index, output_index = self.outputs[transform_node.inputs[0]]
combine_op = operation_specs.WorkerCombineFn(
serialized_fn=pickler.dumps(
(transform.combine_fn, (), {}, ())),
phase=phase,
output_coders=[element_coder],
input=(producer_index, output_index))
self._run_as_op(transform_node, combine_op)
def _get_coder(self, pvalue, windowed=True):
# TODO(robertwb): This should be an attribute of the pvalue itself.
return DataflowRunner._get_coder(
pvalue.element_type or typehints.Any,
pvalue.windowing.windowfn.get_window_coder() if windowed else None)
def _run_as_op(self, transform_node, op):
"""Single-output operation in the same map task as its input."""
map_task_index, _, _ = self.outputs[transform_node.inputs[0]]
op_index = len(self.map_tasks[map_task_index])
output = transform_node.outputs[None]
self.outputs[output] = map_task_index, op_index, 0
self.map_tasks[map_task_index].append((transform_node.full_label, op))
class InMemorySource(iobase.BoundedSource):
"""Source for reading an (as-yet unwritten) set of in-memory encoded elements.
"""
def __init__(self, encoded_elements, coder):
self._encoded_elements = encoded_elements
self._coder = coder
def get_range_tracker(self, unused_start_position, unused_end_position):
return None
def read(self, unused_range_tracker):
for encoded_element in self._encoded_elements:
yield self._coder.decode(encoded_element)
def default_output_coder(self):
return self._coder
class OutputBuffer(object):
def __init__(self, coder):
self.coder = coder
self.elements = []
self.encoded_elements = []
def source(self):
return InMemorySource(self.encoded_elements, self.coder)
def source_bundle(self):
return iobase.SourceBundle(
1.0, InMemorySource(self.encoded_elements, self.coder), None, None)
def __repr__(self):
return 'GroupingOutput[%r]' % len(self.elements)
def append(self, value):
self.elements.append(value)
self.encoded_elements.append(self.coder.encode(value))
class GroupingOutputBuffer(object):
def __init__(self, grouped_coder):
self.grouped_coder = grouped_coder
self.elements = collections.defaultdict(list)
self.frozen = False
def source(self):
return InMemorySource(self.encoded_elements, self.grouped_coder)
def __repr__(self):
return 'GroupingOutputBuffer[%r]' % len(self.elements)
def append(self, pair):
assert not self.frozen
k, v = pair
self.elements[k].append(v)
def freeze(self):
if not self.frozen:
self._encoded_elements = [self.grouped_coder.encode(kv)
for kv in self.elements.iteritems()]
self.frozen = True
return self._encoded_elements
@property
def encoded_elements(self):
return GroupedOutputBuffer(self)
class GroupedOutputBuffer(object):
def __init__(self, buffer):
self.buffer = buffer
def __getitem__(self, ix):
return self.buffer.freeze()[ix]
def __iter__(self):
return iter(self.buffer.freeze())
def __len__(self):
return len(self.buffer.freeze())
def __nonzero__(self):
return True
class PartialGroupByKeyCombineValues(beam.PTransform):
def __init__(self, combine_fn, native=True):
self.combine_fn = combine_fn
self.native = native
def expand(self, input):
if self.native:
return beam.pvalue.PCollection(input.pipeline)
else:
def to_accumulator(v):
return self.combine_fn.add_input(
self.combine_fn.create_accumulator(), v)
return input | beam.Map(lambda k_v: (k_v[0], to_accumulator(k_v[1])))
class MergeAccumulators(beam.PTransform):
def __init__(self, combine_fn, native=True):
self.combine_fn = combine_fn
self.native = native
def expand(self, input):
if self.native:
return beam.pvalue.PCollection(input.pipeline)
else:
merge_accumulators = self.combine_fn.merge_accumulators
def merge_with_existing_key(k_vs):
return (k_vs[0], merge_accumulators(k_vs[1]))
return input | beam.Map(merge_with_existing_key)
class ExtractOutputs(beam.PTransform):
def __init__(self, combine_fn, native=True):
self.combine_fn = combine_fn
self.native = native
def expand(self, input):
if self.native:
return beam.pvalue.PCollection(input.pipeline)
else:
extract_output = self.combine_fn.extract_output
return input | beam.Map(lambda k_v1: (k_v1[0], extract_output(k_v1[1])))
class WorkerRunnerResult(PipelineResult):
def wait_until_finish(self, duration=None):
pass
|
|
__author__ = 'schlitzer'
__all__ = [
'Connection',
'Hash',
'HyperLogLog',
'Key',
'List',
'Publish',
'Scripting',
'Set',
'SSet',
'String',
'Subscribe',
'Transaction'
]
class BaseCommand(object):
def __init__(self):
self._cluster = False
def execute(self, *args, **kwargs):
raise NotImplemented
class Connection(BaseCommand):
def __init__(self):
super().__init__()
def echo(self, *args, shard_key=None, sock=None):
""" Execute ECHO Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'ECHO', *args, shard_key=shard_key, sock=sock)
return self.execute(b'ECHO', *args)
def ping(self, shard_key=None, sock=None):
""" Execute PING Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result,exception
"""
if self._cluster:
return self.execute(b'PING', shard_key=shard_key, sock=sock)
return self.execute(b'PING')
class Geo(BaseCommand):
def __init__(self):
super().__init__()
def geoadd(self, *args):
""" Execute GEOADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEOADD', *args, shard_key=args[0])
return self.execute(b'GEOADD', *args)
def geodist(self, *args):
""" Execute GEODIST Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEODIST', *args, shard_key=args[0])
return self.execute(b'GEODIST', *args)
def geohash(self, *args):
""" Execute GEOHASH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEOHASH', *args, shard_key=args[0])
return self.execute(b'GEOHASH', *args)
def georadius(self, *args):
""" Execute GEORADIUS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEORADIUS', *args, shard_key=args[0])
return self.execute(b'GEORADIUS', *args)
def geopos(self, *args):
""" Execute GEOPOS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEOPOS', *args, shard_key=args[0])
return self.execute(b'GEOPOS', *args)
def georadiusbymember(self, *args):
""" Execute GEORADIUSBYMEMBER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEORADIUSBYMEMBER', *args, shard_key=args[0])
return self.execute(b'GEORADIUSBYMEMBER', *args)
class Key(BaseCommand):
def __init__(self):
super().__init__()
def delete(self, *args):
""" Execute DEL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'DEL', *args, shard_key=args[0])
return self.execute(b'DEL', *args)
def dump(self, *args):
""" Execute DUMP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'DUMP', *args, shard_key=args[0])
return self.execute(b'DUMP', *args)
def exists(self, *args):
""" Execute EXISTS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'EXISTS', *args, shard_key=args[0])
return self.execute(b'EXISTS', *args)
def expire(self, *args):
""" Execute EXPIRE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'EXPIRE', *args, shard_key=args[0])
return self.execute(b'EXPIRE', *args)
def expireat(self, *args):
""" Execute EXPIREAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'EXPIREAT')
return self.execute(b'EXPIREAT', *args)
def keys(self, *args, shard_key=None, sock=None):
""" Execute KEYS Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'KEYS', *args, shard_key=shard_key, sock=sock)
return self.execute(b'KEYS', *args)
def migrate(self, *args):
""" Execute MIGRATE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
raise NotImplemented
return self.execute(b'MIGRATE', *args)
def move(self, *args):
""" Execute MOVE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'MOVE', *args, shard_key=args[0])
return self.execute(b'MOVE', *args)
def object(self, *args, shard_key=None, sock=None):
""" Execute OBJECT Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'DEL', *args, shard_key=shard_key, sock=sock)
return self.execute(b'OBJECT', *args)
def persist(self, *args):
""" Execute PERSIST Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PERSIST', *args, shard_key=args[0])
return self.execute(b'PERSIST', *args)
def pexpire(self, *args):
""" Execute PEXPIRE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PEXPIRE', *args, shard_key=args[0])
return self.execute(b'PEXPIRE', *args)
def pexpireat(self, *args):
""" Execute PEXPIREAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PEXPIREAT', *args, shard_key=args[0])
return self.execute(b'PEXPIREAT', *args)
def pttl(self, *args):
""" Execute PTTL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PTTL', *args, shard_key=args[0])
return self.execute(b'PTTL', *args)
def randomkey(self, *args, shard_key=None, sock=None):
""" Execute RANDOMKEY Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'RANDOMKEY', *args, shard_key=shard_key, sock=sock)
return self.execute(b'RANDOMKEY', *args)
def rename(self, *args):
""" Execute RENAME Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RENAME', *args, shard_key=args[0])
return self.execute(b'RENAME', *args)
def renamenx(self, *args):
""" Execute RENAMENX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RENAMENX', *args, shard_key=args[0])
return self.execute(b'RENAMENX', *args)
def restore(self, *args):
""" Execute RESTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RESTORE', *args, shard_key=args[0])
return self.execute(b'RESTORE', *args)
def scan(self, *args, shard_key=None, sock=None):
""" Execute SCAN Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCAN', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCAN', *args)
def sort(self, *args):
""" Execute SORT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SORT', *args, shard_key=args[0])
return self.execute(b'SORT', *args)
def ttl(self, *args):
""" Execute TTL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'TTL', *args, shard_key=args[0])
return self.execute(b'TTL', *args)
def type(self, *args):
""" Execute TYPE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'TYPE', *args, shard_key=args[0])
return self.execute(b'TYPE', *args)
def wait(self, *args):
""" Execute WAIT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'WAIT', *args, shard_key=args[0])
return self.execute(b'WAIT', *args)
class String(BaseCommand):
def __init__(self):
super().__init__()
def append(self, *args):
""" Execute APPEND Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'APPEND', *args, shard_key=args[0])
return self.execute(b'APPEND', *args)
def bitcount(self, *args):
""" Execute BITCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BITCOUNT', *args, shard_key=args[0])
return self.execute(b'BITCOUNT', *args)
def bitfield(self, *args):
""" Execute BITFIELD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BITFIELD', *args, shard_key=args[0])
return self.execute(b'BITFIELD', *args)
def bitop(self, *args):
""" Execute BITOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BITOP', *args, shard_key=args[1])
return self.execute(b'BITOP', *args)
def bitpos(self, *args):
""" Execute BITPOS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BITPOS', *args, shard_key=args[0])
return self.execute(b'BITPOS', *args)
def decr(self, *args):
""" Execute DECR Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'DECR', *args, shard_key=args[0])
return self.execute(b'DECR', *args)
def decrby(self, *args):
""" Execute DECRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'DECRBY', *args, shard_key=args[0])
return self.execute(b'DECRBY', *args)
def get(self, *args):
""" Execute GET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GET', *args, shard_key=args[0])
return self.execute(b'GET', *args)
def getbit(self, *args):
""" Execute GETBIT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GETBIT', *args, shard_key=args[0])
return self.execute(b'GETBIT', *args)
def getrange(self, *args):
""" Execute GETRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GETRANGE', *args, shard_key=args[0])
return self.execute(b'GETRANGE', *args)
def getset(self, *args):
""" Execute GETSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GETSET', *args, shard_key=args[0])
return self.execute(b'GETSET', *args)
def incr(self, *args):
""" Execute INCR Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'INCR', *args, shard_key=args[0])
return self.execute(b'INCR', *args)
def incrby(self, *args):
""" Execute INCRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'INCRBY', *args, shard_key=args[0])
return self.execute(b'INCRBY', *args)
def incrbyfloat(self, *args):
""" Execute INCRBYFLOAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'INCRBYFLOAT', *args, shard_key=args[0])
return self.execute(b'INCRBYFLOAT', *args)
def mget(self, *args):
""" Execute MGET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'MGET', *args, shard_key=args[0])
return self.execute(b'MGET', *args)
def mset(self, *args):
""" Execute MSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'MSET', *args, shard_key=args[0])
return self.execute(b'MSET', *args)
def msetnx(self, *args):
""" Execute MSETNX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'MSETNX', *args, shard_key=args[0])
return self.execute(b'MSETNX', *args)
def psetex(self, *args):
""" Execute PSETEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PSETEX', *args, shard_key=args[0])
return self.execute(b'PSETEX', *args)
def set(self, *args):
""" Execute SET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SET', *args, shard_key=args[0])
return self.execute(b'SET', *args)
def setbit(self, *args):
""" Execute SETBIT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SETBIT', *args, shard_key=args[0])
return self.execute(b'SETBIT', *args)
def setex(self, *args):
""" Execute SETEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SETEX', *args, shard_key=args[0])
return self.execute(b'SETEX', *args)
def setnx(self, *args):
""" Execute SETNX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SETNX', *args, shard_key=args[0])
return self.execute(b'SETNX', *args)
def setrange(self, *args):
""" Execute SETRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SETRANGE', *args, shard_key=args[0])
return self.execute(b'SETRANGE', *args)
def strlen(self, *args):
""" Execute STRLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'STRLEN', *args, shard_key=args[0])
return self.execute(b'STRLEN', *args)
class Hash(BaseCommand):
def __init__(self):
super().__init__()
def hdel(self, *args):
""" Execute HDEL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HDEL', *args, shard_key=args[0])
return self.execute(b'HDEL', *args)
def hexists(self, *args):
""" Execute HEXISTS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HEXISTS', *args, shard_key=args[0])
return self.execute(b'HEXISTS', *args)
def hget(self, *args):
""" Execute HGET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HGET', *args, shard_key=args[0])
return self.execute(b'HGET', *args)
def hgetall(self, *args):
""" Execute HGETALL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HGETALL', *args, shard_key=args[0])
return self.execute(b'HGETALL', *args)
def hincrby(self, *args):
""" Execute HINCRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HINCRBY', *args, shard_key=args[0])
return self.execute(b'HINCRBY', *args)
def hincrbyfloat(self, *args):
""" Execute HINCRBYFLOAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HINCRBYFLOAT', *args, shard_key=args[0])
return self.execute(b'HINCRBYFLOAT', *args)
def hkeys(self, *args):
""" Execute HKEYS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HKEYS', *args, shard_key=args[0])
return self.execute(b'HKEYS', *args)
def hlen(self, *args):
""" Execute HLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HLEN', *args, shard_key=args[0])
return self.execute(b'HLEN', *args)
def hmget(self, *args):
""" Execute HMGET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HMGET', *args, shard_key=args[0])
return self.execute(b'HMGET', *args)
def hmset(self, *args):
""" Execute HMSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HMSET', *args, shard_key=args[0])
return self.execute(b'HMSET', *args)
def hset(self, *args):
""" Execute HSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HSET', *args, shard_key=args[0])
return self.execute(b'HSET', *args)
def hsetnx(self, *args):
""" Execute HSETNX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HSETNX', *args, shard_key=args[0])
return self.execute(b'HSETNX', *args)
def hstrlen(self, *args):
""" Execute HSTRLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HSTRLEN', *args, shard_key=args[0])
return self.execute(b'HSTRLEN', *args)
def hvals(self, *args):
""" Execute HVALS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HVALS', *args, shard_key=args[0])
return self.execute(b'HVALS', *args)
def hscan(self, *args):
""" Execute HSCAN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HSCAN', *args, shard_key=args[0])
return self.execute(b'HSCAN', *args)
class List(BaseCommand):
def __init__(self):
super().__init__()
def blpop(self, *args):
""" Execute BLPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BLPOP', *args, shard_key=args[0])
return self.execute(b'BLPOP', *args)
def brpop(self, *args):
""" Execute BRPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BRPOP', *args, shard_key=args[0])
return self.execute(b'BRPOP', *args)
def brpoplpush(self, *args):
""" Execute BRPOPPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BRPOPPUSH', *args, shard_key=args[0])
return self.execute(b'BRPOPPUSH', *args)
def lindex(self, *args):
""" Execute LINDEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LINDEX', *args, shard_key=args[0])
return self.execute(b'LINDEX', *args)
def linsert(self, *args):
""" Execute LINSERT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LINSERT', *args, shard_key=args[0])
return self.execute(b'LINSERT', *args)
def llen(self, *args):
""" Execute LLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LLEN', *args, shard_key=args[0])
return self.execute(b'LLEN', *args)
def lpop(self, *args):
""" Execute LPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LPOP', *args, shard_key=args[0])
return self.execute(b'LPOP', *args)
def lpush(self, *args):
""" Execute LPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LPUSH', *args, shard_key=args[0])
return self.execute(b'LPUSH', *args)
def lpushx(self, *args):
""" Execute LPUSHX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LPUSHX', *args, shard_key=args[0])
return self.execute(b'LPUSHX', *args)
def lrange(self, *args):
""" Execute LRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LRANGE', *args, shard_key=args[0])
return self.execute(b'LRANGE', *args)
def lrem(self, *args):
""" Execute LREM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LREM', *args, shard_key=args[0])
return self.execute(b'LREM', *args)
def lset(self, *args):
""" Execute LSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LSET', *args, shard_key=args[0])
return self.execute(b'LSET', *args)
def ltrim(self, *args):
""" Execute LTRIM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LTRIM', *args, shard_key=args[0])
return self.execute(b'LTRIM', *args)
def rpop(self, *args):
""" Execute RPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RPOP', *args, shard_key=args[0])
return self.execute(b'RPOP', *args)
def rpoplpush(self, *args):
""" Execute RPOPLPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RPOPLPUSH', *args, shard_key=args[0])
return self.execute(b'RPOPLPUSH', *args)
def rpush(self, *args):
""" Execute RPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RPUSH', *args, shard_key=args[0])
return self.execute(b'RPUSH', *args)
def rpushx(self, *args):
""" Execute RPUSHX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RPUSHX', *args, shard_key=args[0])
return self.execute(b'RPUSHX', *args)
class Set(BaseCommand):
def __init__(self):
super().__init__()
def sadd(self, *args):
""" Execute SADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SADD', *args, shard_key=args[0])
return self.execute(b'SADD', *args)
def scard(self, *args):
""" Execute SCARD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCARD', *args, shard_key=args[0])
return self.execute(b'SCARD', *args)
def sdiff(self, *args):
""" Execute SDIFF Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SDIFF', *args, shard_key=args[0])
return self.execute(b'SDIFF', *args)
def sdiffstore(self, *args):
""" Execute SDIFFSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SDIFFSTORE', *args, shard_key=args[0])
return self.execute(b'SDIFFSTORE', *args)
def sinter(self, *args):
""" Execute SINTER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SINTER', *args, shard_key=args[0])
return self.execute(b'SINTER', *args)
def sinterstore(self, *args):
""" Execute SINTERSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SINTERSTORE', *args, shard_key=args[0])
return self.execute(b'SINTERSTORE', *args)
def sismember(self, *args):
""" Execute SISMEMBER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SISMEMBER', *args, shard_key=args[0])
return self.execute(b'SISMEMBER', *args)
def smembers(self, *args):
""" Execute SMEMBERS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SMEMBERS', *args, shard_key=args[0])
return self.execute(b'SMEMBERS', *args)
def smove(self, *args):
""" Execute SMOVE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SMOVE', *args, shard_key=args[0])
return self.execute(b'SMOVE', *args)
def spop(self, *args):
""" Execute SPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SPOP', *args, shard_key=args[0])
return self.execute(b'SPOP', *args)
def srandmember(self, *args):
""" Execute SRANDMEMBER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SRANDMEMBER', *args, shard_key=args[0])
return self.execute(b'SRANDMEMBER', *args)
def srem(self, *args):
""" Execute SREM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SREM', *args, shard_key=args[0])
return self.execute(b'SREM', *args)
def sunion(self, *args):
""" Execute SUNION Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SUNION', *args, shard_key=args[0])
return self.execute(b'SUNION', *args)
def sunoinstore(self, *args):
""" Execute SUNIONSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SUNIONSTORE', *args, shard_key=args[0])
return self.execute(b'SUNIONSTORE', *args)
def sscan(self, *args):
""" Execute SSCAN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SSCAN', *args, shard_key=args[0])
return self.execute(b'SSCAN', *args)
class SSet(BaseCommand):
def __init__(self):
super().__init__()
def zadd(self, *args):
""" Execute ZADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZADD', *args, shard_key=args[0])
return self.execute(b'ZADD', *args)
def zcard(self, *args):
""" Execute ZCARD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZCARD', *args, shard_key=args[0])
return self.execute(b'ZCARD', *args)
def zcount(self, *args):
""" Execute ZCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZCOUNT', *args, shard_key=args[0])
return self.execute(b'ZCOUNT', *args)
def zincrby(self, *args):
""" Execute ZINCRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZINCRBY', *args, shard_key=args[0])
return self.execute(b'ZINCRBY', *args)
def zinterstore(self, *args):
""" Execute ZINTERSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZINTERSTORE', *args, shard_key=args[0])
return self.execute(b'ZINTERSTORE', *args)
def zlexcount(self, *args):
""" Execute ZLEXCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZLEXCOUNT', *args, shard_key=args[0])
return self.execute(b'ZLEXCOUNT', *args)
def zrange(self, *args):
""" Execute ZRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZRANGE', *args, shard_key=args[0])
return self.execute(b'ZRANGE', *args)
def zrangebylex(self, *args):
""" Execute ZRANGEBYLEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZRANGEBYLEX', *args, shard_key=args[0])
return self.execute(b'ZRANGEBYLEX', *args)
def zrangebyscore(self, *args):
""" Execute ZRANGEBYSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZRANGEBYSCORE', *args, shard_key=args[0])
return self.execute(b'ZRANGEBYSCORE', *args)
def zrank(self, *args):
""" Execute ZRANK Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZRANK', *args, shard_key=args[0])
return self.execute(b'ZRANK', *args)
def zrem(self, *args):
""" Execute ZREM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREM', *args, shard_key=args[0])
return self.execute(b'ZREM', *args)
def zremrangebylex(self, *args):
""" Execute ZREMRANGEBYLEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREMRANGEBYLEX', *args, shard_key=args[0])
return self.execute(b'ZREMRANGEBYLEX', *args)
def zremrangebyrank(self, *args):
""" Execute ZREMRANGEBYRANK Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREMRANGEBYRANK', *args, shard_key=args[0])
return self.execute(b'ZREMRANGEBYRANK', *args)
def zremrangebyscrore(self, *args):
""" Execute ZREMRANGEBYSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREMRANGEBYSCORE', *args, shard_key=args[0])
return self.execute(b'ZREMRANGEBYSCORE', *args)
def zrevrange(self, *args):
""" Execute ZREVRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREVRANGE', *args, shard_key=args[0])
return self.execute(b'ZREVRANGE', *args)
def zrevrangebylex(self, *args):
""" Execute ZREVRANGEBYLEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREVRANGEBYLEX', *args, shard_key=args[0])
return self.execute(b'ZREVRANGEBYLEX', *args)
def zrevrangebyscore(self, *args):
""" Execute ZREVRANGEBYSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREVRANGEBYSCORE', *args, shard_key=args[0])
return self.execute(b'ZREVRANGEBYSCORE', *args)
def zrevrank(self, *args):
""" Execute ZREVRANK Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREVRANK', *args, shard_key=args[0])
return self.execute(b'ZREVRANK', *args)
def zscore(self, *args):
""" Execute ZSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZSCORE', *args, shard_key=args[0])
return self.execute(b'ZSCORE', *args)
def zunionstore(self, *args):
""" Execute ZUNIONSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZUNIONSTORE', *args, shard_key=args[0])
return self.execute(b'ZUNIONSTORE', *args)
def zscan(self, *args):
""" Execute ZSCAN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZSCAN', *args, shard_key=args[0])
return self.execute(b'ZSCAN', *args)
class HyperLogLog(BaseCommand):
def __init__(self):
super().__init__()
def pfadd(self, *args):
""" Execute PFADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PFADD', *args, shard_key=args[0])
return self.execute(b'PFADD', *args)
def pfcount(self, *args):
""" Execute PFCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PFCOUNT', *args, shard_key=args[0])
return self.execute(b'PFCOUNT', *args)
def pfmerge(self, *args):
""" Execute PFMERGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PFMERGE', *args, shard_key=args[0])
return self.execute(b'PFMERGE', *args)
class Publish(BaseCommand):
def __init__(self):
super().__init__()
def publish(self, *args):
""" Execute PUBLISH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
raise NotImplemented
return self.execute(b'PUBLISH', *args)
class Subscribe(object):
def write(self, *args):
raise NotImplemented
def psubscribe(self, *args):
""" Execute PSUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(b'PSUBSCRIBE', *args)
def punsubscribe(self, *args):
""" Execute PUNSUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(b'PUNSUBSCRIBE', *args)
def subscribe(self, *args):
""" Execute SUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(b'SUBSCRIBE', *args)
def unsubscribe(self, *args):
""" Execute UNSUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(b'UNSUBSCRIBE', *args)
class Transaction(BaseCommand):
def __init__(self):
super().__init__()
def discard(self, *args, shard_key=None, sock=None):
""" Execute DISCARD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'DISCARD', *args, shard_key=shard_key, sock=sock)
return self.execute(b'DISCARD', *args)
def exec(self, *args, shard_key=None, sock=None):
""" Execute EXEC Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'EXEC', *args, shard_key=shard_key, sock=sock)
return self.execute(b'EXEC', *args)
def multi(self, *args, shard_key=None, sock=None):
""" Execute MULTI Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'MULTI', *args, shard_key=shard_key, sock=sock)
return self.execute(b'MULTI', *args)
def unwatch(self, *args, shard_key=None, sock=None):
""" Execute UNWATCH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'UNWATCH', *args, shard_key=shard_key, sock=sock)
return self.execute(b'UNWATCH', *args)
def watch(self, *args):
""" Execute WATCH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'WATCH', *args, shard_key=args[0])
return self.execute(b'WATCH', *args)
class Scripting(BaseCommand):
def __init__(self):
super().__init__()
def eval(self, *args, shard_key=None, sock=None):
""" Execute EVAL Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'EVAL', *args, shard_key=shard_key, sock=sock)
return self.execute(b'EVAL', *args)
def evalsha(self, *args, shard_key=None, sock=None):
""" Execute EVALSHA Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'EVALSHA', *args, shard_key=shard_key, sock=sock)
return self.execute(b'EVALSHA', *args)
def script_debug(self, *args, shard_key=None, sock=None):
""" Execute SCRIPT DEBUG Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCRIPT', b'DEBUG', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCRIPT', b'DEBUG', *args)
def script_exists(self, *args, shard_key=None, sock=None):
""" Execute SCRIPT EXISTS Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCRIPT', b'EXISTS', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCRIPT', b'EXISTS', *args)
def script_flush(self, *args, shard_key=None, sock=None):
""" Execute SCRIPT FLUSH Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCRIPT', b'FLUSH', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCRIPT', b'FLUSH', *args)
def script_kill(self, *args, shard_key=None, sock=None):
""" Execute SCRIPT KILL Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCRIPT', b'KILL', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCRIPT', b'KILL', *args)
def script_load(self, *args, shard_key=None, sock=None):
""" Execute SCRIPT LOAD Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCRIPT', b'LOAD', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCRIPT', b'LOAD', *args)
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.plugins.v3 import migrate_server
from nova import exception
from nova.openstack.common import uuidutils
from nova.tests.api.openstack.compute.plugins.v3 import \
admin_only_action_common
from nova.tests.api.openstack import fakes
class MigrateServerTests(admin_only_action_common.CommonTests):
def setUp(self):
super(MigrateServerTests, self).setUp()
self.controller = migrate_server.MigrateServerController()
self.compute_api = self.controller.compute_api
def _fake_controller(*args, **kwargs):
return self.controller
self.stubs.Set(migrate_server, 'MigrateServerController',
_fake_controller)
self.app = fakes.wsgi_app_v3(init_only=('servers',
'os-migrate-server'),
fake_auth_context=self.context)
self.mox.StubOutWithMock(self.compute_api, 'get')
def test_migrate(self):
method_translations = {'migrate': 'resize',
'migrate_live': 'live_migrate'}
body_map = {'migrate_live': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}}
args_map = {'migrate_live': ((False, False, 'hostname'), {})}
self._test_actions(['migrate', 'migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
def test_migrate_with_non_existed_instance(self):
body_map = {'migrate_live': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}}
self._test_actions_with_non_existed_instance(
['migrate', 'migrate_live'], body_map=body_map)
def test_migrate_raise_conflict_on_invalid_state(self):
method_translations = {'migrate': 'resize',
'migrate_live': 'live_migrate'}
body_map = {'migrate_live': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}}
args_map = {'migrate_live': ((False, False, 'hostname'), {})}
self._test_actions_raise_conflict_on_invalid_state(
['migrate', 'migrate_live'], body_map=body_map, args_map=args_map,
method_translations=method_translations)
def test_actions_with_locked_instance(self):
method_translations = {'migrate': 'resize',
'migrate_live': 'live_migrate'}
body_map = {'migrate_live': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}}
args_map = {'migrate_live': ((False, False, 'hostname'), {})}
self._test_actions_with_locked_instance(
['migrate', 'migrate_live'], body_map=body_map, args_map=args_map,
method_translations=method_translations)
def _test_migrate_exception(self, exc_info, expected_result):
self.mox.StubOutWithMock(self.compute_api, 'resize')
instance = self._stub_instance_get()
self.compute_api.resize(self.context, instance).AndRaise(exc_info)
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance['uuid'],
{'migrate': None})
self.assertEqual(expected_result, res.status_int)
def test_migrate_too_many_instances(self):
exc_info = exception.TooManyInstances(overs='', req='', used=0,
allowed=0, resource='')
self._test_migrate_exception(exc_info, 413)
def _test_migrate_live_succeeded(self, param):
self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
instance = self._stub_instance_get()
self.compute_api.live_migrate(self.context, instance, False,
False, 'hostname')
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{'migrate_live': param})
self.assertEqual(202, res.status_int)
def test_migrate_live_enabled(self):
param = {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}
self._test_migrate_live_succeeded(param)
def test_migrate_live_enabled_with_string_param(self):
param = {'host': 'hostname',
'block_migration': "False",
'disk_over_commit': "False"}
self._test_migrate_live_succeeded(param)
def test_migrate_live_without_host(self):
res = self._make_request('/servers/FAKE/action',
{'migrate_live': {'block_migration': False,
'disk_over_commit': False}})
self.assertEqual(400, res.status_int)
def test_migrate_live_without_block_migration(self):
res = self._make_request('/servers/FAKE/action',
{'migrate_live': {'host': 'hostname',
'disk_over_commit': False}})
self.assertEqual(400, res.status_int)
def test_migrate_live_without_disk_over_commit(self):
res = self._make_request('/servers/FAKE/action',
{'migrate_live': {'host': 'hostname',
'block_migration': False}})
self.assertEqual(400, res.status_int)
def test_migrate_live_with_invalid_block_migration(self):
res = self._make_request('/servers/FAKE/action',
{'migrate_live': {'host': 'hostname',
'block_migration': "foo",
'disk_over_commit': False}})
self.assertEqual(400, res.status_int)
def test_migrate_live_with_invalid_disk_over_commit(self):
res = self._make_request('/servers/FAKE/action',
{'migrate_live': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': "foo"}})
self.assertEqual(400, res.status_int)
def _test_migrate_live_failed_with_exception(self, fake_exc,
uuid=None):
self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
instance = self._stub_instance_get(uuid=uuid)
self.compute_api.live_migrate(self.context, instance, False,
False, 'hostname').AndRaise(fake_exc)
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{'migrate_live':
{'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}})
self.assertEqual(400, res.status_int)
self.assertIn(unicode(fake_exc), res.body)
def test_migrate_live_compute_service_unavailable(self):
self._test_migrate_live_failed_with_exception(
exception.ComputeServiceUnavailable(host='host'))
def test_migrate_live_invalid_hypervisor_type(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidHypervisorType())
def test_migrate_live_invalid_cpu_info(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidCPUInfo(reason=""))
def test_migrate_live_unable_to_migrate_to_self(self):
uuid = uuidutils.generate_uuid()
self._test_migrate_live_failed_with_exception(
exception.UnableToMigrateToSelf(instance_id=uuid,
host='host'),
uuid=uuid)
def test_migrate_live_destination_hypervisor_too_old(self):
self._test_migrate_live_failed_with_exception(
exception.DestinationHypervisorTooOld())
def test_migrate_live_no_valid_host(self):
self._test_migrate_live_failed_with_exception(
exception.NoValidHost(reason=''))
def test_migrate_live_invalid_local_storage(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidLocalStorage(path='', reason=''))
def test_migrate_live_invalid_shared_storage(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidSharedStorage(path='', reason=''))
def test_migrate_live_hypervisor_unavailable(self):
self._test_migrate_live_failed_with_exception(
exception.HypervisorUnavailable(host=""))
def test_migrate_live_instance_not_running(self):
self._test_migrate_live_failed_with_exception(
exception.InstanceNotRunning(instance_id=""))
def test_migrate_live_pre_check_error(self):
self._test_migrate_live_failed_with_exception(
exception.MigrationPreCheckError(reason=''))
|
|
from __future__ import print_function
import sys, os
import re
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from h2o import H2OFrame
from h2o.automl import H2OAutoML
from tests import pyunit_utils as pu
from _automl_utils import import_dataset, get_partitioned_model_names
def check_model_property(model_names, prop_name, present=True, actual_value=None, default_value=None):
for mn in model_names:
model = h2o.get_model(mn)
if present:
assert prop_name in model.params.keys(), \
"missing {prop} in model {model}".format(prop=prop_name, model=mn)
assert actual_value is None or model.params[prop_name]['actual'] == actual_value, \
"actual value for {prop} in model {model} is {val}, expected {exp}".format(prop=prop_name, model=mn, val=model.params[prop_name]['actual'], exp=actual_value)
assert default_value is None or model.params[prop_name]['default'] == default_value, \
"default value for {prop} in model {model} is {val}, expected {exp}".format(prop=prop_name, model=mn, val=model.params[prop_name]['default'], exp=default_value)
else:
assert prop_name not in model.params.keys(), "unexpected {prop} in model {model}".format(prop=prop_name, model=mn)
def list_keys_in_memory(project_name=None):
mem_keys = h2o.ls().key
automl_keys = [k for k in mem_keys if re.search(r'_AutoML_', k) and (project_name is None or project_name not in k)]
automl_frame_keys = [k for k in mem_keys if re.search(r'^levelone_', k)]
prediction_keys = [k for k in mem_keys if re.search(r'(^|_)prediction_', k)]
metrics_keys = [k for k in mem_keys if re.search(r'^modelmetrics_', k)]
metalearner_keys = [k for k in mem_keys if re.search(r'^metalearner', k)]
fold_keys = [k for k in mem_keys if re.search(r'_fold_', k)]
all_model_keys = [k for k in automl_keys
if k not in automl_frame_keys
and k not in prediction_keys
and k not in metrics_keys
and k not in fold_keys]
cv_keys = [k for k in mem_keys if re.search(r'(^|_)cv_', k)]
cv_prediction_keys = [k for k in cv_keys if k in prediction_keys]
cv_metrics_keys = [k for k in cv_keys if k in metrics_keys]
cv_fold_assignment = [k for k in cv_keys if k in fold_keys]
cv_model_keys = [k for k in cv_keys
if k in all_model_keys
and k not in cv_fold_assignment]
base_model_keys = [k for k in all_model_keys
if k not in cv_keys
and k not in metalearner_keys]
return dict(
all=mem_keys,
models_all=all_model_keys,
models_base=base_model_keys,
predictions=prediction_keys,
metrics=metrics_keys,
automl=automl_keys,
cv_all=cv_keys,
cv_models=cv_model_keys,
cv_predictions=cv_prediction_keys,
cv_metrics=cv_metrics_keys,
cv_fold_assignment=cv_fold_assignment,
metalearners=metalearner_keys,
)
def test_suite_clean_cv_predictions():
kcvp = 'keep_cross_validation_predictions'
nfolds = 5
def setup_and_train(param_enabled=None):
h2o.remove_all()
ds = import_dataset()
state = 'enabled' if param_enabled is True else 'disabled' if param_enabled is False else 'default'
if param_enabled is None:
aml = H2OAutoML(project_name='keep_cross_validation_predictions_'+state,
nfolds=nfolds, max_models=3, seed=1)
else:
aml = H2OAutoML(project_name='keep_cross_validation_predictions_'+state,
nfolds=nfolds, max_models=8, seed=1,
keep_cross_validation_predictions=param_enabled)
aml.train(y=ds.target, training_frame=ds.train)
# print(aml.leaderboard)
return aml
def assert_cv_predictions_on_model(model_name, present=True):
model = h2o.get_model(model_name)
cv_predictions = model.cross_validation_predictions()
holdout_predictions = model.cross_validation_holdout_predictions()
# see last comment line below for ideal assertion if cv predictions could be returned as null,
# but this can't be done in a clean way for autoML right now due to StackedEnsemble
# assert not h2o.get_model(m).cross_validation_predictions(), "unexpected cv predictions for model "+m
for p in cv_predictions:
if present:
assert p is not None, "missing cv predictions for model "+model_name
else:
assert not p, "unexpected cv predictions for model "+model_name
if present:
assert holdout_predictions is not None, "missing holdout predictions for model "+model_name
else:
assert not holdout_predictions, "unexpected holdout predictions for model "+model_name
def test_default_behaviour():
print("\n=== "+kcvp+" default behaviour ===")
aml = setup_and_train()
models = get_partitioned_model_names(aml.leaderboard)
keys = list_keys_in_memory()
preds = len(keys['cv_predictions'])
assert preds == 0, "{preds} CV predictions were not cleaned from memory".format(preds=preds)
for m in models.base:
assert_cv_predictions_on_model(m, False)
for m in models.se:
assert not h2o.get_model(h2o.get_model(m).metalearner().model_id).cross_validation_predictions()
def test_param_enabled():
print("\n=== enabling "+kcvp+" ===")
aml = setup_and_train(True)
models = get_partitioned_model_names(aml.leaderboard)
keys = list_keys_in_memory()
preds = len(keys['cv_predictions'])
expected = len(models.all) * (nfolds + 1) # +1 for holdout prediction
assert preds == expected, "missing CV predictions in memory, got {actual}, expected {expected}".format(actual=preds, expected=expected)
for m in models.base:
assert_cv_predictions_on_model(m)
for m in models.se:
assert_cv_predictions_on_model(h2o.get_model(m).metalearner().model_id)
def test_param_disabled():
print("\n=== disabling "+kcvp+" ===")
aml = setup_and_train(False)
models = get_partitioned_model_names(aml.leaderboard)
keys = list_keys_in_memory()
preds = len(keys['cv_predictions'])
assert preds == 0, "{preds} CV predictions were not cleaned from memory".format(preds=preds)
for m in models.base:
assert_cv_predictions_on_model(m, False)
for m in models.se:
assert not h2o.get_model(h2o.get_model(m).metalearner().model_id).cross_validation_predictions()
def test_SE_retraining_fails_when_param_disabled():
print("\n=== disabling "+kcvp+" and retraining ===")
total_runs = 4
aml = setup_and_train(False) # first run
first_models = get_partitioned_model_names(aml.leaderboard)
first_bof = next(m for m in first_models.se if re.search(r'_BestOfFamily_', m))
ds = import_dataset()
for i in range(total_runs - 1):
aml.train(y=ds.target, training_frame=ds.train)
models = get_partitioned_model_names(aml.leaderboard)
first_se_all_models = [m for m in first_models.se if re.search(r'_AllModels_', m)]
se_all_models = [m for m in models.se if re.search(r'_AllModels_', m)]
se_best_of_family = [m for m in models.se if re.search(r'_BestOfFamily_', m)]
lb = aml.leaderboard
print(lb.head(lb.nrows))
assert len(models.se) == len(se_all_models) + len(se_best_of_family)
assert len(se_all_models) == len(first_se_all_models), \
"expecting only the {} first StackedEnsemble_AllModels, but got {}".format(len(first_se_all_models), len(se_all_models))
assert se_all_models[0] in first_models.se, "first StackedEnsemble_AllModels got replaced by new one"
if len(se_best_of_family) > 1:
assert first_bof in se_best_of_family, "first StackedEnsemble_BestOfFamily disappeared after multiple runs"
row_of = lambda id: lb[lb['model_id'] == id]
first_bof_row = row_of(first_bof)
assert all(all(row[i] == first_bof_row[i] for i in range(1, lb.ncols)) for row in [row_of(se) for se in se_best_of_family]), \
"expecting possibly 2+ similar StackedEnsemble_BestOfFamily (corner case), but managed to obtain 2 different ones!"
else:
assert len(se_best_of_family) == 1, "expecting only the first StackedEnsemble_BestOfFamily, but got {}".format(len(se_best_of_family))
assert se_best_of_family[0] == first_bof, "first StackedEnsemble_Best_of_Family got replaced by new one"
def test_SE_retraining_works_when_param_enabled():
print("\n=== enabling "+kcvp+" and retraining ===")
total_runs = 4
aml = setup_and_train(True) # first run
ds = import_dataset()
for i in range(total_runs - 1):
aml.train(y=ds.target, training_frame=ds.train)
models = get_partitioned_model_names(aml.leaderboard)
se_all_models = [m for m in models.se if re.search(r'_AllModels_', m)]
se_best_of_family = [m for m in models.se if re.search(r'_BestOfFamily_', m)]
assert len(models.se) == len(se_all_models) + len(se_best_of_family)
assert len(se_best_of_family) + len(se_all_models) >= total_runs, "some StackedEnsembles are missing"
return [
test_default_behaviour,
test_param_enabled,
test_param_disabled,
test_SE_retraining_fails_when_param_disabled,
test_SE_retraining_works_when_param_enabled
]
def test_suite_clean_cv_models():
kcvm = 'keep_cross_validation_models'
nfolds = 5
def setup_and_train(param_enabled=None):
h2o.remove_all()
ds = import_dataset()
state = 'enabled' if param_enabled is True else 'disabled' if param_enabled is False else 'default'
if param_enabled is None:
aml = H2OAutoML(project_name='keep_cross_validation_models'+state,
nfolds=nfolds, max_models=3, seed=1)
else:
aml = H2OAutoML(project_name='keep_cross_validation_models'+state,
nfolds=nfolds, max_models=8, seed=1,
keep_cross_validation_models=param_enabled)
aml.train(y=ds.target, training_frame=ds.train)
# print(aml.leaderboard)
return aml
def test_default_behaviour():
print("\n=== "+kcvm+" default behaviour ===")
aml = setup_and_train()
models = get_partitioned_model_names(aml.leaderboard)
check_model_property(models.se, kcvm, False)
check_model_property(models.base, kcvm, True, False, True)
keys = list_keys_in_memory()
tot, cv = len(keys['models_all']), len(keys['cv_models'])
print("total models in memory = {tot}, among which {cv} CV models".format(tot=tot, cv=cv))
assert tot > 0, "no models left in memory"
assert cv == 0, "{cv} CV models were not cleaned from memory".format(cv=cv)
for m in models.base:
assert not h2o.get_model(m).cross_validation_models(), "unexpected cv models for model "+m
for m in models.se:
metal = h2o.get_model(h2o.get_model(m).metalearner().model_id)
assert not metal.cross_validation_models(), "unexpected cv models for metalearner of model "+m
def test_param_enabled():
print("\n=== enabling "+kcvm+" ===")
aml = setup_and_train(True)
models = get_partitioned_model_names(aml.leaderboard)
check_model_property(models.se, kcvm, False)
check_model_property(models.base, kcvm, True, True, True)
keys = list_keys_in_memory()
tot, cv = len(keys['models_all']), len(keys['cv_models'])
print("total models in memory = {tot}, among which {cv} CV models".format(tot=tot, cv=cv))
assert tot > 0, "no models left in memory"
expected = len(models.all) * nfolds
assert cv == expected, "missing CV models in memory, got {actual}, expected {expected}".format(actual=cv, expected=expected)
for m in models.base:
assert h2o.get_model(m).cross_validation_models(), "missing cv models for model "+m
for m in models.se:
metal = h2o.get_model(h2o.get_model(m).metalearner().model_id)
assert metal.cross_validation_models(), "missing cv models for metalearner of model "+m
def test_param_disabled():
print("\n=== disabling "+kcvm+" ===")
aml = setup_and_train(False)
models = get_partitioned_model_names(aml.leaderboard)
check_model_property(models.se, kcvm, False)
check_model_property(models.base, kcvm, True, False, True)
keys = list_keys_in_memory()
tot, cv = len(keys['models_all']), len(keys['cv_models'])
print("total models in memory = {tot}, among which {cv} CV models".format(tot=tot, cv=cv))
assert tot > 0, "no models left in memory"
assert cv == 0, "{cv} CV models were not cleaned from memory".format(cv=cv)
for m in models.base:
assert not h2o.get_model(m).cross_validation_models(), "unexpected cv models for model "+m
for m in models.se:
metal = h2o.get_model(h2o.get_model(m).metalearner().model_id)
assert not metal.cross_validation_models(), "unexpected cv models for metalearner of model "+m
return [
test_default_behaviour,
test_param_enabled,
test_param_disabled,
]
def test_suite_remove_automl():
def contains_leaderboard(project_name, keys):
return "Leaderboard_{}".format(project_name) in keys['all'].values
def contains_event_log(project_name, keys):
return "Events_{}".format(project_name) in keys['all'].values
def frame_in_cluster(frame):
# reload the first row of the frame to verify that no vec has been removed
return frame.key is not None and H2OFrame.get_frame(frame.key, rows=1) is not None
def test_remove_automl_with_xval():
ds = import_dataset()
project_name = 'aml_with_xval_remove_test'
max_models = 5
nfolds = 5
aml = H2OAutoML(project_name=project_name,
nfolds=nfolds,
max_models=max_models,
seed=1)
aml.train(y=ds.target, training_frame=ds.train, validation_frame=ds.valid, leaderboard_frame=ds.test)
keys = list_keys_in_memory()
assert aml.key.startswith(project_name)
assert contains_leaderboard(aml.key, keys)
assert contains_event_log(aml.key, keys)
num_SEs = len(keys['metalearners'])
print({k: len(v) for k, v in keys.items()})
expectations = dict(
models_base=max_models + num_SEs,
cv_models=0,
predictions=0,
metrics=(max_models * 3 # for each non-SE model, 1 on training_frame, 1 on validation_frame, 1 on leaderboard_frame
+ (num_SEs * 2) # for each SE model, 1 on training frame, 1 on leaderboard frame
+ (num_SEs * 2) # for each SE metalearner, 1+1 on levelone training+validation
+ (1 if any(("DeepLearning" in x for x in keys["metrics"])) else 0) # DeepLearning has 2 training metrics (IDK why)
)
)
for k, v in expectations.items():
assert len(keys[k]) == v, "expected {} {}, but got {}".format(v, k, len(keys[k]))
h2o.remove(aml)
clean = list_keys_in_memory()
print(clean['all'].values)
assert not contains_leaderboard(aml.key, clean)
assert not contains_event_log(aml.key, clean)
assert len(clean['models_base']) == 0
assert len(clean['cv_models']) == 0
assert len(clean['models_all']) == 0
assert len(clean['predictions']) == 0
assert len(clean['metrics']) == 0
assert len(clean['automl']) == 0
for frame in [ds.train, ds.valid, ds.test]:
assert frame_in_cluster(frame), "frame {} has been removed from cluster".format(frame.frame_id)
def test_remove_automl_with_xval_when_keeping_all_cv_details():
ds = import_dataset()
project_name = 'aml_with_xval_remove_test'
max_models = 5
nfolds = 5
aml = H2OAutoML(project_name=project_name,
nfolds=nfolds,
max_models=max_models,
seed=1,
keep_cross_validation_predictions=True,
keep_cross_validation_fold_assignment=True,
keep_cross_validation_models=True)
aml.train(y=ds.target, training_frame=ds.train)
keys = list_keys_in_memory()
# print(keys['all'].values)
assert aml.key.startswith(project_name)
assert contains_leaderboard(aml.key, keys)
assert contains_event_log(aml.key, keys)
num_SEs = len(keys['metalearners']) / (nfolds + 1) # keeping cv models, so metalearners include cv models
print({k: len(v) for k, v in keys.items()})
expectations = dict(
models_base=max_models + num_SEs,
cv_models=(max_models+num_SEs) * nfolds, # 1 cv model per fold for all models, incl. SEs
predictions=(len(keys['cv_models']) # cv predictions
+ len(keys['models_base']) # cv holdout predictions
),
metrics=(len(keys['cv_models']) * 3 # for each cv model, 1 on training frame, 1 on validation frame (=training for cv), one on adapted frame (to be removed with PUBDEV-6638)
+ len(keys['models_base']) # for each model, 1 on training_frame
+ (num_SEs * 1) # for each SE, 1 on levelone training
+ (1 if any(("DeepLearning" in x for x in keys["metrics"])) else 0) # DeepLearning has 2 training metrics (IDK why)
)
)
for k, v in expectations.items():
assert len(keys[k]) == v, "expected {} {}, but got {}".format(v, k, len(keys[k]))
h2o.remove(aml)
clean = list_keys_in_memory()
print(clean['all'].values)
assert not contains_leaderboard(aml.key, clean)
assert not contains_event_log(aml.key, clean)
assert len(clean['models_base']) == 0
assert len(clean['cv_models']) == 0
assert len(clean['models_all']) == 0
assert len(clean['predictions']) == 0
assert len(clean['metrics']) == 0
assert len(clean['automl']) == 0
for frame in [ds.train, ds.valid, ds.test]:
assert frame_in_cluster(frame), "frame {} has been removed from cluster".format(frame.frame_id)
def test_remove_automl_no_xval():
ds = import_dataset()
project_name = 'aml_no_xval_remove_test'
max_models = 5
aml = H2OAutoML(project_name=project_name,
nfolds=0,
max_models=max_models,
seed=1)
aml.train(y=ds.target, training_frame=ds.train, blending_frame=ds.valid)
keys = list_keys_in_memory()
# print(keys['all'].values)
assert aml.key.startswith(project_name)
assert contains_leaderboard(aml.key, keys)
assert contains_event_log(aml.key, keys)
num_SEs = len(keys['metalearners'])
print({k: len(v) for k, v in keys.items()})
expectations = dict(
models_base=max_models + num_SEs,
cv_models=0,
predictions=0,
metrics=(2*len(keys['models_all'])) # for each model, 1 on training_frame, 1 on validation frame which is also the leaderboard frame
)
for k, v in expectations.items():
assert len(keys[k]) == v, "expected {} {}, but got {}".format(v, k, len(keys[k]))
h2o.remove(aml)
clean = list_keys_in_memory()
print(clean['all'].values)
assert not contains_leaderboard(aml.key, clean)
assert not contains_event_log(aml.key, clean)
assert len(clean['models_base']) == 0
assert len(clean['cv_models']) == 0
assert len(clean['models_all']) == 0
assert len(clean['metrics']) == 0
assert len(clean['predictions']) == 0
assert len(clean['automl']) == 0
for frame in [ds.train, ds.valid, ds.test]:
assert frame_in_cluster(frame), "frame {} has been removed from cluster".format(frame.frame_id)
def test_remove_automl_after_individual_manual_deletions():
ds = import_dataset()
project_name='aml_no_xval_remove_test'
max_models = 3
aml = H2OAutoML(project_name=project_name,
nfolds=0,
max_models=max_models,
seed=1)
aml.train(y=ds.target, training_frame=ds.train, blending_frame=ds.valid)
keys = list_keys_in_memory()
# manually remove the first item for each category to verify robustness of global automl deletion
# for example, to verify that exceptions (if any) are handled correctly when automl is trying to remove a base model that was already removed
for k, v in keys.items():
if k == 'all': continue
if len(v) > 0:
h2o.remove(v[0])
h2o.remove(aml)
clean = list_keys_in_memory()
print(clean['all'].values)
assert aml.key.startswith(project_name)
assert not contains_leaderboard(aml.key, clean)
assert not contains_event_log(aml.key, clean)
assert len(clean['models_base']) == 0
assert len(clean['cv_models']) == 0
assert len(clean['models_all']) == 0
assert len(clean['metrics']) == 0
assert len(clean['predictions']) == 0
assert len(clean['automl']) == 0
for frame in [ds.train, ds.valid, ds.test]:
assert frame_in_cluster(frame), "frame {} has been removed from cluster".format(frame.frame_id)
return [
test_remove_automl_with_xval,
test_remove_automl_with_xval_when_keeping_all_cv_details,
test_remove_automl_no_xval,
test_remove_automl_after_individual_manual_deletions
]
pu.run_tests([
test_suite_clean_cv_predictions(),
test_suite_clean_cv_models(),
test_suite_remove_automl()
])
|
|
# Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
Unit tests for the fstabfailures module
"""
import sys
import unittest
import mock
import moduletests.src.fstabfailures
try:
# Python 2.x
from cStringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
if sys.hexversion >= 0x3040000:
# contextlib.redirect_stdout was introduced in Python 3.4
import contextlib
else:
# contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
import contextlib2 as contextlib
# builtins was named __builtin__ in Python 2 so accommodate the change for the purposes of mocking the open call
if sys.version_info >= (3,):
builtins_name = "builtins"
else:
builtins_name = "__builtin__"
class Testfstabfailures(unittest.TestCase):
config_file_path = "/etc/fstab"
def setUp(self):
self.output = StringIO()
def tearDown(self):
self.output.close()
@mock.patch("moduletests.src.fstabfailures.open", mock.mock_open(
read_data="LABEL=/ / ext4 defaults,noatime,nofail 0 0\n"))
def test_alami_defaultfstab(self):
self.assertTrue(moduletests.src.fstabfailures.write_default_fstab(self.config_file_path, "alami"))
@mock.patch("moduletests.src.fstabfailures.open", mock.mock_open(
read_data="LABEL=/ / xfs defaults,noatime,nofail 0 0\n"))
def test_alami2_defaultfstab(self):
self.assertTrue(moduletests.src.fstabfailures.write_default_fstab(self.config_file_path, "alami2"))
@mock.patch("moduletests.src.fstabfailures.open", mock.mock_open(
read_data="LABEL=/ / ext4 defaults,noatime,nofail 0 0\n"))
def test_suse_defaultfstab(self):
self.assertTrue(moduletests.src.fstabfailures.write_default_fstab(self.config_file_path, "suse"))
@mock.patch("moduletests.src.fstabfailures.open", mock.mock_open(
read_data="LABEL=/ / ext4 defaults,noatime,nofail 0 0\n"))
def test_rhel_defaultfstab(self):
self.assertTrue(moduletests.src.fstabfailures.write_default_fstab(self.config_file_path, "rhel"))
@mock.patch("moduletests.src.fstabfailures.open", mock.mock_open(
read_data="LABEL=/ / ext4 defaults,noatime,nofail 0 0\n"))
def test_ubuntu_defaultfstab(self):
self.assertTrue(moduletests.src.fstabfailures.write_default_fstab(self.config_file_path, "ubuntu"))
@mock.patch("moduletests.src.fstabfailures.open", mock.mock_open(
read_data="LABEL=/ / ext4 defaults,noatime,nofail 0 0\n"))
def test_nodistro_defaultfstab(self):
with self.assertRaises(ValueError) as ve:
moduletests.src.fstabfailures.write_default_fstab(self.config_file_path, "invalid distro string")
self.assertEqual(str(ve), "Invalid distribution. Unable to continue.")
@mock.patch("moduletests.src.fstabfailures.open", side_effect=IOError)
def test_exception_defaultfstab(self, open_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.fstabfailures.write_default_fstab,
self.config_file_path,
"alami")
self.assertEqual(self.output.getvalue(), "[WARN] Unable to write default /etc/fstab, aborting.\n")
self.assertTrue(open_mock.called)
def test_full_parse_fstab_with_blank_lines(self):
open_mock = mock.mock_open(read_data="LABEL=/ / ext4 defaults,noatime,nofail 0 0\n \n\t\n")
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
with mock.patch("moduletests.src.fstabfailures.open", open_mock):
with contextlib.redirect_stdout(self.output):
self.assertEqual(moduletests.src.fstabfailures.parse_fstab(self.config_file_path),
[{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "0"}])
def test_parse_fstab_five_entry(self):
open_mock = mock.mock_open(read_data="LABEL=/ / ext4 defaults,noatime,nofail 0\n")
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
with mock.patch("moduletests.src.fstabfailures.open", open_mock):
with contextlib.redirect_stdout(self.output):
self.assertEqual(moduletests.src.fstabfailures.parse_fstab(self.config_file_path),
[{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "0"}])
def test_parse_fstab_four_entry(self):
open_mock = mock.mock_open(read_data="LABEL=/ / ext4 defaults,noatime,nofail\n")
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
with mock.patch("moduletests.src.fstabfailures.open", open_mock):
with contextlib.redirect_stdout(self.output):
self.assertEqual(moduletests.src.fstabfailures.parse_fstab(self.config_file_path),
[{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "0"}])
def test_comment_parse_fstab(self):
open_mock = mock.mock_open(read_data="#\n")
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
with mock.patch("moduletests.src.fstabfailures.open", open_mock):
self.assertEqual(moduletests.src.fstabfailures.parse_fstab(self.config_file_path), [])
@mock.patch("moduletests.src.fstabfailures.open", side_effect=IOError)
def test_exception_parse_fstab(self, open_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.fstabfailures.parse_fstab, self.config_file_path)
self.assertEqual(self.output.getvalue(), "Unable to open and parse /etc/fstab. Invalid fstab?\n")
self.assertTrue(open_mock.called)
def test_nofsck_check_fsck_true(self):
fstab = [{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "1"}]
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.fstabfailures.check_fsck(fstab))
self.assertEqual(self.output.getvalue(), "Checking for volumes with fsck enabled\n\tfsck enabled: 'LABEL=/'\n")
def test_nofsck_check_fsck_false(self):
fstab = [{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "0"}]
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.fstabfailures.check_fsck(fstab))
self.assertEqual(self.output.getvalue(), "Checking for volumes with fsck enabled\n")
def test_nofail_check_nofail_true(self):
fstab = [{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime",
"Dump": "0", "fsck": "0"}]
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.fstabfailures.check_nofail(fstab))
self.assertEqual(self.output.getvalue(), "Checking for volumes without nofail\n\tMissing nofail: 'LABEL=/'\n")
def test_nofail_check_nofail_false(self):
fstab = [{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "0"}]
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.fstabfailures.check_nofail(fstab))
self.assertEqual(self.output.getvalue(), "Checking for volumes without nofail\n")
@mock.patch("moduletests.src.fstabfailures.open", mock.mock_open(read_data="stuff"))
def test_success_fix(self):
fstab = [{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "0"}]
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.fstabfailures.fix(fstab, self.config_file_path))
self.assertTrue(self.output.getvalue().endswith("aster/docs/modules/fstabfailures.md for further details\n"))
@mock.patch("moduletests.src.fstabfailures.open", side_effect=IOError)
def test_exception_fix(self, open_mock):
with contextlib.redirect_stdout(self.output):
fstab = [{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "0"}]
self.assertRaises(IOError, moduletests.src.fstabfailures.fix, fstab, self.config_file_path)
self.assertEqual(self.output.getvalue(), "[WARN] Unable to write new /etc/fstab. "
"Please review logs to determine the cause of the issue.\n")
self.assertTrue(open_mock.called)
@mock.patch("moduletests.src.fstabfailures.get_config_dict")
@mock.patch("moduletests.src.fstabfailures.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.fstabfailures.backup", return_value=True)
@mock.patch("moduletests.src.fstabfailures.parse_fstab", return_value=[{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "0"}])
@mock.patch("moduletests.src.fstabfailures.check_nofail", return_value=True)
@mock.patch("moduletests.src.fstabfailures.check_fsck", return_value=True)
@mock.patch("moduletests.src.fstabfailures.fix", return_value=True)
def test_run_rewrite(self,
fix_mock,
check_fsck_mock,
check_nofail_mock,
parse_fstab,
backup_mock,
isfile_mock,
get_config_dict_mock):
get_config_dict_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.fstabfailures.run())
self.assertEqual(self.output.getvalue(), "/etc/fstab found, continuing.\n")
self.assertTrue(check_fsck_mock.called)
self.assertTrue(check_nofail_mock.called)
self.assertTrue(fix_mock.called)
self.assertTrue(parse_fstab.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(get_config_dict_mock.called)
@mock.patch("moduletests.src.fstabfailures.get_config_dict")
@mock.patch("moduletests.src.fstabfailures.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.fstabfailures.backup", return_value=True)
@mock.patch("moduletests.src.fstabfailures.parse_fstab", return_value=[{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "0"}])
@mock.patch("moduletests.src.fstabfailures.check_nofail", return_value=False)
@mock.patch("moduletests.src.fstabfailures.check_fsck", return_value=False)
def test_run_norewrite(self,
check_fsck_mock,
check_nofail_mock,
parse_fstab_mock,
backup_mock,
isfile_mock,
get_config_dict_mock):
get_config_dict_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.fstabfailures.run())
self.assertTrue(self.output.getvalue().endswith(
"[SUCCESS] /etc/fstab has nofail set and is not set to fsck.\n"))
self.assertTrue(check_fsck_mock.called)
self.assertTrue(check_nofail_mock.called)
self.assertTrue(parse_fstab_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(get_config_dict_mock.called)
@mock.patch("moduletests.src.fstabfailures.get_config_dict")
@mock.patch("moduletests.src.fstabfailures.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.fstabfailures.write_default_fstab", return_value=True)
@mock.patch("moduletests.src.fstabfailures.backup", return_value=True)
@mock.patch("moduletests.src.fstabfailures.parse_fstab", return_value=[{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "0"}])
@mock.patch("moduletests.src.fstabfailures.check_nofail", return_value=False)
@mock.patch("moduletests.src.fstabfailures.check_fsck", return_value=False)
def test_run_default_fstab_norewrite(self,
check_fsck_mock,
check_nofail_mock,
parse_fstab_mock,
backup_mock,
write_default_fstab_mock,
isfile_mock,
get_config_dict_mock):
get_config_dict_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"DISTRO": "alami"}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.fstabfailures.run())
self.assertTrue(self.output.getvalue().endswith(
"[SUCCESS] /etc/fstab has nofail set and is not set to fsck.\n"))
self.assertTrue(check_fsck_mock.called)
self.assertTrue(check_nofail_mock.called)
self.assertTrue(parse_fstab_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(write_default_fstab_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(get_config_dict_mock.called)
@mock.patch("moduletests.src.fstabfailures.get_config_dict")
@mock.patch("moduletests.src.fstabfailures.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.fstabfailures.backup", return_value=True)
@mock.patch("moduletests.src.fstabfailures.parse_fstab", side_effect=[OSError,
[{"Filesystem": "LABEL=/",
"Mountpoint": "/",
"FSType": "ext4",
"Options": "defaults,noatime,nofail",
"Dump": "0", "fsck": "0"}]])
@mock.patch("moduletests.src.fstabfailures.write_default_fstab", return_value=True)
@mock.patch("moduletests.src.fstabfailures.check_nofail", return_value=False)
@mock.patch("moduletests.src.fstabfailures.check_fsck", return_value=False)
def test_run_parse_exception(self,
check_fsck_mock,
check_nofail_mock,
write_default_fstab_mock,
parse_fstab_mock,
backup_mock,
isfile_mock,
get_config_dict_mock):
get_config_dict_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"DISTRO": "alami"}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.fstabfailures.run())
self.assertTrue(self.output.getvalue().endswith(
"[SUCCESS] /etc/fstab has nofail set and is not set to fsck.\n"))
self.assertTrue(check_fsck_mock.called)
self.assertTrue(check_nofail_mock.called)
self.assertTrue(write_default_fstab_mock.called)
self.assertTrue(parse_fstab_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(get_config_dict_mock.called)
@mock.patch("moduletests.src.fstabfailures.get_config_dict")
@mock.patch("moduletests.src.fstabfailures.os.path.isfile", side_effect=Exception)
@mock.patch("moduletests.src.fstabfailures.restore")
def test_run_exception(self, restore_mock, isfile_mock, get_config_dict_mock):
get_config_dict_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.fstabfailures.run())
self.assertTrue(self.output.getvalue().endswith("Review the logs to determine the cause of the issue.\n"))
self.assertTrue(restore_mock.called)
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.fstabfailures.get_config_dict", side_effect=Exception)
def test_run_config_exception(self, get_config_dict_mock):
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.fstabfailures.run())
self.assertTrue(self.output.getvalue().endswith("Review the logs to determine the cause of the issue.\n"))
self.assertTrue(get_config_dict_mock.called)
|
|
"""Tasks related to projects, including fetching repository code, cleaning
``conf.py`` files, and rebuilding documentation.
"""
import fnmatch
import os
import shutil
import json
import logging
import socket
import requests
import datetime
from celery import task
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from slumber.exceptions import HttpClientError
from builds.constants import LATEST
from builds.models import Build, Version
from core.utils import send_email, run_on_app_servers
from doc_builder.loader import get_builder_class
from doc_builder.base import restoring_chdir
from doc_builder.environments import DockerEnvironment
from projects.exceptions import ProjectImportError
from projects.models import ImportedFile, Project
from projects.utils import run, make_api_version, make_api_project
from projects.constants import LOG_TEMPLATE
from builds.constants import STABLE
from projects import symlinks
from privacy.loader import Syncer
from tastyapi import api, apiv2
from search.parse_json import process_all_json_files
from search.utils import process_mkdocs_json
from restapi.utils import index_search_request
from vcs_support import utils as vcs_support_utils
import tastyapi
try:
from readthedocs.projects.signals import before_vcs, after_vcs, before_build, after_build
except:
from projects.signals import before_vcs, after_vcs, before_build, after_build
log = logging.getLogger(__name__)
HTML_ONLY = getattr(settings, 'HTML_ONLY_PROJECTS', ())
@task(default_retry_delay=7 * 60, max_retries=5)
@restoring_chdir
def update_docs(pk, version_pk=None, build_pk=None, record=True, docker=False,
search=True, force=False, intersphinx=True, localmedia=True,
api=None, basic=False, **kwargs):
"""
The main entry point for updating documentation.
It handles all of the logic around whether a project is imported or we
created it. Then it will build the html docs and other requested parts.
`pk`
Primary key of the project to update
`record`
Whether or not to keep a record of the update in the database. Useful
for preventing changes visible to the end-user when running commands
from the shell, for example.
"""
# Dependency injection to allow for testing
if api is None:
api = tastyapi.api
apiv2 = tastyapi.apiv2
else:
apiv2 = api
start_time = datetime.datetime.utcnow()
try:
project_data = api.project(pk).get()
except HttpClientError:
log.exception(LOG_TEMPLATE.format(project=pk, version='', msg='Failed to get project data on build. Erroring.'))
project = make_api_project(project_data)
# Don't build skipped projects
if project.skip:
log.info(LOG_TEMPLATE.format(project=project.slug, version='', msg='Skipping'))
return
else:
log.info(LOG_TEMPLATE.format(project=project.slug, version='', msg='Building'))
version = ensure_version(api, project, version_pk)
build = create_build(build_pk)
results = {}
# Build Servery stuff
try:
record_build(api=api, build=build, record=record, results=results, state='cloning')
vcs_results = setup_vcs(version, build, api)
if vcs_results:
results.update(vcs_results)
if project.documentation_type == 'auto':
update_documentation_type(version, apiv2)
if docker or settings.DOCKER_ENABLE:
record_build(api=api, build=build, record=record, results=results, state='building')
docker = DockerEnvironment(version)
build_results = docker.build()
results.update(build_results)
else:
record_build(api=api, build=build, record=record, results=results, state='installing')
setup_results = setup_environment(version)
results.update(setup_results)
record_build(api=api, build=build, record=record, results=results, state='building')
build_results = build_docs(version, force, search, localmedia)
results.update(build_results)
except vcs_support_utils.LockTimeout, e:
results['checkout'] = (423, "", "Version locked, retrying in 5 minutes.")
log.info(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Unable to lock, will retry"))
# http://celery.readthedocs.org/en/3.0/userguide/tasks.html#retrying
# Should completely retry the task for us until max_retries is exceeded
update_docs.retry(exc=e, throw=False)
except ProjectImportError, e:
results['checkout'] = (404, "", 'Failed to import project; skipping build.\n\nError\n-----\n\n%s' % e.message)
# Close out build in finally with error.
pass
except Exception, e:
log.error(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Top-level Build Failure"), exc_info=True)
results['checkout'] = (404, "", 'Top-level Build Failure: %s' % e.message)
finally:
record_build(api=api, build=build, record=record, results=results, state='finished', start_time=start_time)
record_pdf(api=api, record=record, results=results, state='finished', version=version)
log.info(LOG_TEMPLATE.format(project=version.project.slug, version='', msg='Build finished'))
build_id = build.get('id')
# Web Server Tasks
if build_id:
finish_build.delay(
version_pk=version.pk,
build_pk=build_id,
hostname=socket.gethostname(),
html=results.get('html', [404])[0] == 0,
localmedia=results.get('localmedia', [404])[0] == 0,
search=results.get('search', [404])[0] == 0,
pdf=version.project.enable_pdf_build,
epub=version.project.enable_epub_build,
)
def ensure_version(api, project, version_pk):
"""
Ensure we're using a sane version.
"""
if version_pk:
version_data = api.version(version_pk).get()
else:
version_data = api.version(project.slug).get(slug=LATEST)['objects'][0]
version = make_api_version(version_data)
return version
def update_documentation_type(version, api):
"""
Automatically determine the doc type for a user.
"""
checkout_path = version.project.checkout_path(version.slug)
os.chdir(checkout_path)
files = run('find .')[1].split('\n')
markdown = sphinx = 0
for filename in files:
if fnmatch.fnmatch(filename, '*.md') or fnmatch.fnmatch(filename, '*.markdown'):
markdown += 1
elif fnmatch.fnmatch(filename, '*.rst'):
sphinx += 1
ret = 'sphinx'
if markdown > sphinx:
ret = 'mkdocs'
project_data = api.project(version.project.pk).get()
project_data['documentation_type'] = ret
api.project(version.project.pk).put(project_data)
version.project.documentation_type = ret
def docker_build(version, search=True, force=False, intersphinx=True,
localmedia=True):
"""
The code that executes inside of docker
"""
environment_results = setup_environment(version)
results = build_docs(version=version, force=force, search=search,
localmedia=localmedia)
results.update(environment_results)
return results
def setup_vcs(version, build, api):
"""
Update the checkout of the repo to make sure it's the latest.
This also syncs versions in the DB.
"""
log.info(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg='Updating docs from VCS'))
try:
update_output = update_imported_docs(version.pk, api)
commit = version.project.vcs_repo(version.slug).commit
if commit:
build['commit'] = commit
except ProjectImportError:
log.error(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='Failed to import project; skipping build'), exc_info=True)
raise
return update_output
@task()
def update_imported_docs(version_pk, api=None):
"""
Check out or update the given project's repository.
"""
if api is None:
api = tastyapi.api
version_data = api.version(version_pk).get()
version = make_api_version(version_data)
project = version.project
ret_dict = {}
# Make Dirs
if not os.path.exists(project.doc_path):
os.makedirs(project.doc_path)
if not project.vcs_repo():
raise ProjectImportError(("Repo type '{0}' unknown".format(project.repo_type)))
with project.repo_nonblockinglock(version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
before_vcs.send(sender=version)
# Get the actual code on disk
if version:
log.info(
LOG_TEMPLATE.format(
project=project.slug,
version=version.slug,
msg='Checking out version {slug}: {identifier}'.format(
slug=version.slug,
identifier=version.identifier
)
)
)
version_slug = version.slug
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.checkout(
version.identifier,
)
else:
# Does this ever get called?
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Updating to latest revision'))
version_slug = LATEST
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.update()
after_vcs.send(sender=version)
# Update tags/version
version_post_data = {'repo': version_repo.repo_url}
if version_repo.supports_tags:
version_post_data['tags'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.tags
]
if version_repo.supports_branches:
version_post_data['branches'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.branches
]
try:
apiv2.project(project.pk).sync_versions.post(version_post_data)
except Exception, e:
print "Sync Versions Exception: %s" % e.message
return ret_dict
def setup_environment(version):
"""
Build the virtualenv and install the project into it.
Always build projects with a virtualenv.
"""
ret_dict = {}
project = version.project
build_dir = os.path.join(project.venv_path(version=version.slug), 'build')
if os.path.exists(build_dir):
log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='Removing existing build dir'))
shutil.rmtree(build_dir)
if project.use_system_packages:
site_packages = '--system-site-packages'
else:
site_packages = '--no-site-packages'
# Here the command has been modified to support different
# interpreters.
ret_dict['venv'] = run(
'{cmd} {site_packages} {path}'.format(
cmd='{interpreter} -m virtualenv'.format(
interpreter=project.python_interpreter),
site_packages=site_packages,
path=project.venv_path(version=version.slug)
)
)
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
if project.use_system_packages:
ignore_option = '-I'
else:
ignore_option = ''
requirements = ' '.join([
'sphinx==1.3.1',
'Pygments==2.0.2',
'virtualenv==13.1.0',
'setuptools==18.0.1',
'docutils==0.11',
'mkdocs==0.14.0',
'mock==1.0.1',
'pillow==2.6.1',
'readthedocs-sphinx-ext==0.5.4',
'sphinx-rtd-theme==0.1.8',
'alabaster>=0.7,<0.8,!=0.7.5',
'recommonmark==0.1.1',
])
wheeldir = os.path.join(settings.SITE_ROOT, 'deploy', 'wheels')
ret_dict['doc_builder'] = run(
(
'{cmd} install --use-wheel --find-links={wheeldir} -U '
'{ignore_option} {requirements}'
).format(
cmd=project.venv_bin(version=version.slug, bin='pip'),
ignore_option=ignore_option,
wheeldir=wheeldir,
requirements=requirements,
)
)
# Handle requirements
requirements_file_path = project.requirements_file
checkout_path = project.checkout_path(version.slug)
if not requirements_file_path:
builder_class = get_builder_class(project.documentation_type)
docs_dir = builder_class(version).docs_dir()
for path in [docs_dir, '']:
for req_file in ['pip_requirements.txt', 'requirements.txt']:
test_path = os.path.join(checkout_path, path, req_file)
print('Testing %s' % test_path)
if os.path.exists(test_path):
requirements_file_path = test_path
break
if requirements_file_path:
os.chdir(checkout_path)
ret_dict['requirements'] = run(
'{cmd} install --exists-action=w -r {requirements}'.format(
cmd=project.venv_bin(version=version.slug, bin='pip'),
requirements=requirements_file_path))
# Handle setup.py
os.chdir(project.checkout_path(version.slug))
if os.path.isfile("setup.py"):
if getattr(settings, 'USE_PIP_INSTALL', False):
ret_dict['install'] = run(
'{cmd} install --ignore-installed .'.format(
cmd=project.venv_bin(version=version.slug, bin='pip')))
else:
ret_dict['install'] = run(
'{cmd} setup.py install --force'.format(
cmd=project.venv_bin(version=version.slug,
bin='python')))
else:
ret_dict['install'] = (999, "", "No setup.py, skipping install")
return ret_dict
@task()
def build_docs(version, force, search, localmedia):
"""
This handles the actual building of the documentation
"""
project = version.project
results = {}
before_build.send(sender=version)
with project.repo_nonblockinglock(version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
html_builder = get_builder_class(project.documentation_type)(version)
if force:
html_builder.force()
html_builder.append_conf()
results['html'] = html_builder.build()
if results['html'][0] == 0:
html_builder.move()
# Gracefully attempt to move files via task on web workers.
try:
move_files.delay(
version_pk=version.pk,
html=True,
hostname=socket.gethostname(),
)
except socket.error:
pass
fake_results = (999, "Project Skipped, Didn't build",
"Project Skipped, Didn't build")
if 'mkdocs' in project.documentation_type:
if search:
try:
search_builder = get_builder_class('mkdocs_json')(version)
results['search'] = search_builder.build()
if results['search'][0] == 0:
search_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="JSON Build Error"), exc_info=True)
if 'sphinx' in project.documentation_type:
# Search builder. Creates JSON from docs and sends it to the
# server.
if search:
try:
search_builder = get_builder_class('sphinx_search')(version)
results['search'] = search_builder.build()
if results['search'][0] == 0:
# Copy json for safe keeping
search_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="JSON Build Error"), exc_info=True)
# Local media builder for singlepage HTML download archive
if localmedia:
try:
localmedia_builder = get_builder_class('sphinx_singlehtmllocalmedia')(version)
results['localmedia'] = localmedia_builder.build()
if results['localmedia'][0] == 0:
localmedia_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="Local Media HTML Build Error"), exc_info=True)
# Optional build steps
if version.project.slug not in HTML_ONLY and not project.skip:
if project.enable_pdf_build:
pdf_builder = get_builder_class('sphinx_pdf')(version)
results['pdf'] = pdf_builder.build()
# Always move pdf results even when there's an error.
# if pdf_results[0] == 0:
pdf_builder.move()
else:
results['pdf'] = fake_results
if project.enable_epub_build:
epub_builder = get_builder_class('sphinx_epub')(version)
results['epub'] = epub_builder.build()
if results['epub'][0] == 0:
epub_builder.move()
else:
results['epub'] = fake_results
after_build.send(sender=version)
return results
def create_build(build_pk):
"""
Old placeholder for build creation. Now it just gets it from the database.
"""
if build_pk:
build = api.build(build_pk).get()
for key in ['project', 'version', 'resource_uri', 'absolute_uri']:
if key in build:
del build[key]
else:
build = {}
return build
def record_build(api, record, build, results, state, start_time=None):
"""
Record a build by hitting the API.
Returns nothing
"""
if not record:
return None
build['builder'] = socket.gethostname()
setup_steps = ['checkout', 'venv', 'doc_builder', 'requirements', 'install']
output_steps = ['html']
all_steps = setup_steps + output_steps
build['state'] = state
if 'html' in results:
build['success'] = results['html'][0] == 0
else:
build['success'] = False
# Set global state
# for step in all_steps:
# if results.get(step, False):
# if results.get(step)[0] != 0:
# results['success'] = False
build['exit_code'] = max([results.get(step, [0])[0] for step in all_steps])
build['setup'] = build['setup_error'] = ""
build['output'] = build['error'] = ""
if start_time:
build['length'] = (datetime.datetime.utcnow() - start_time).total_seconds()
for step in setup_steps:
if step in results:
build['setup'] += "\n\n%s\n-----\n\n" % step
try:
build['setup'] += results.get(step)[1]
except (IndexError, TypeError):
pass
build['setup_error'] += "\n\n%s\n-----\n\n" % step
try:
build['setup_error'] += results.get(step)[2]
except (IndexError, TypeError):
pass
for step in output_steps:
if step in results:
build['output'] += "\n\n%s\n-----\n\n" % step
try:
build['output'] += results.get(step)[1]
except (IndexError, TypeError):
pass
build['error'] += "\n\n%s\n-----\n\n" % step
try:
build['error'] += results.get(step)[2]
except (IndexError, TypeError):
pass
# Attempt to stop unicode errors on build reporting
for key, val in build.items():
if isinstance(val, basestring):
build[key] = val.decode('utf-8', 'ignore')
try:
api.build(build['id']).put(build)
except Exception:
log.error("Unable to post a new build", exc_info=True)
def record_pdf(api, record, results, state, version):
if not record or 'sphinx' not in version.project.documentation_type:
return None
if not version.project.enable_pdf_build:
return None
try:
if 'pdf' in results:
pdf_exit = results['pdf'][0]
pdf_success = pdf_exit == 0
pdf_output = results['pdf'][1]
pdf_error = results['pdf'][2]
else:
pdf_exit = 999
pdf_success = False
pdf_output = pdf_error = "PDF Failed"
pdf_output = pdf_output.decode('utf-8', 'ignore')
pdf_error = pdf_error.decode('utf-8', 'ignore')
if 'Output written on' in pdf_output:
pdf_success = True
api.build.post(dict(
state=state,
project='/api/v1/project/%s/' % version.project.pk,
version='/api/v1/version/%s/' % version.pk,
success=pdf_success,
type='pdf',
output=pdf_output,
error=pdf_error,
exit_code=pdf_exit,
builder=socket.gethostname(),
))
except Exception:
log.error(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Unable to post a new build"), exc_info=True)
###########
# Web tasks
###########
@task(queue='web')
def finish_build(version_pk, build_pk, hostname=None, html=False,
localmedia=False, search=False, pdf=False, epub=False):
"""
Build Finished, do house keeping bits
"""
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
if html:
version.active = True
version.built = True
version.save()
move_files(
version_pk=version_pk,
hostname=hostname,
html=html,
localmedia=localmedia,
search=search,
pdf=pdf,
epub=epub,
)
symlinks.symlink_cnames(version)
symlinks.symlink_translations(version)
symlinks.symlink_subprojects(version)
if version.project.single_version:
symlinks.symlink_single_version(version)
else:
symlinks.remove_symlink_single_version(version)
# Delayed tasks
update_static_metadata.delay(version.project.pk)
fileify.delay(version.pk, commit=build.commit)
update_search.delay(version.pk, commit=build.commit)
if not html and version.slug != STABLE and build.exit_code != 423:
send_notifications.delay(version.pk, build_pk=build.pk)
@task(queue='web')
def move_files(version_pk, hostname, html=False, localmedia=False, search=False, pdf=False, epub=False):
version = Version.objects.get(pk=version_pk)
if html:
from_path = version.project.artifact_path(version=version.slug, type=version.project.documentation_type)
target = version.project.rtd_build_path(version.slug)
Syncer.copy(from_path, target, host=hostname)
if 'sphinx' in version.project.documentation_type:
if localmedia:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_localmedia')
to_path = version.project.get_production_media_path(type='htmlzip', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if search:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_search')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
# Always move PDF's because the return code lies.
if pdf:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_pdf')
to_path = version.project.get_production_media_path(type='pdf', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if epub:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_epub')
to_path = version.project.get_production_media_path(type='epub', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if 'mkdocs' in version.project.documentation_type:
if search:
from_path = version.project.artifact_path(version=version.slug, type='mkdocs_json')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
@task(queue='web')
def update_search(version_pk, commit):
version = Version.objects.get(pk=version_pk)
if 'sphinx' in version.project.documentation_type:
page_list = process_all_json_files(version, build_dir=False)
elif 'mkdocs' in version.project.documentation_type:
page_list = process_mkdocs_json(version, build_dir=False)
else:
log.error('Unknown documentation type: %s' % version.project.documentation_type)
return
log_msg = ' '.join([page['path'] for page in page_list])
log.info("(Search Index) Sending Data: %s [%s]" % (version.project.slug, log_msg))
index_search_request(
version=version,
page_list=page_list,
commit=commit,
project_scale=0,
page_scale=0,
# Don't index sections to speed up indexing.
# They aren't currently exposed anywhere.
section=False,
)
@task(queue='web')
def fileify(version_pk, commit):
"""
Create ImportedFile objects for all of a version's files.
This is a prereq for indexing the docs for search.
It also causes celery-haystack to kick off an index of the file.
"""
version = Version.objects.get(pk=version_pk)
project = version.project
if not commit:
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Imported File not being built because no commit information'))
path = project.rtd_build_path(version.slug)
if path:
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Creating ImportedFiles'))
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if fnmatch.fnmatch(filename, '*.html'):
dirpath = os.path.join(root.replace(path, '').lstrip('/'),
filename.lstrip('/'))
obj, created = ImportedFile.objects.get_or_create(
project=project,
version=version,
path=dirpath,
name=filename,
commit=commit,
)
if not created:
obj.save()
# Delete ImportedFiles from previous versions
ImportedFile.objects.filter(project=project, version=version).exclude(commit=commit).delete()
else:
log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='No ImportedFile files'))
@task(queue='web')
def send_notifications(version_pk, build_pk):
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
for hook in version.project.webhook_notifications.all():
webhook_notification(version, build, hook.url)
for email in version.project.emailhook_notifications.all().values_list('email', flat=True):
email_notification(version, build, email)
def email_notification(version, build, email):
log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='sending email to: %s' % email))
context = {'version': version,
'project': version.project,
'build': build,
'build_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
build.get_absolute_url()),
'unsub_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
reverse('projects_notifications', args=[version.project.slug])),
}
if build.commit:
title = _('Failed: {project.name} ({commit})').format(commit=build.commit[:8], **context)
else:
title = _('Failed: {project.name} ({version.verbose_name})').format(**context)
send_email(
email,
title,
template='projects/email/build_failed.txt',
template_html='projects/email/build_failed.html',
context=context
)
def webhook_notification(version, build, hook_url):
data = json.dumps({
'name': project.name,
'slug': project.slug,
'build': {
'id': build.id,
'success': build.success,
'date': build.date.strftime('%Y-%m-%d %H:%M:%S'),
}
})
log.debug(LOG_TEMPLATE.format(project=project.slug, version='', msg='sending notification to: %s' % hook_url))
requests.post(hook_url, data=data)
@task(queue='web')
def update_static_metadata(project_pk, path=None):
"""Update static metadata JSON file
Metadata settings include the following project settings:
version
The default version for the project, default: `latest`
language
The default language for the project, default: `en`
languages
List of languages built by linked translation projects.
"""
project = Project.objects.get(pk=project_pk)
if not path:
path = project.static_metadata_path()
log.info(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Updating static metadata',
))
translations = [trans.language for trans in project.translations.all()]
languages = set(translations)
# Convert to JSON safe types
metadata = {
'version': project.default_version,
'language': project.language,
'languages': list(languages),
'single_version': project.single_version,
}
try:
fh = open(path, 'w+')
json.dump(metadata, fh)
fh.close()
Syncer.copy(path, path, host=socket.gethostname(), file=True)
except (AttributeError, IOError) as e:
log.debug(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Cannot write to metadata.json: {0}'.format(e)
))
#@periodic_task(run_every=crontab(hour="*", minute="*/5", day_of_week="*"))
def update_docs_pull(record=False, force=False):
"""
A high-level interface that will update all of the projects.
This is mainly used from a cronjob or management command.
"""
for version in Version.objects.filter(built=True):
try:
update_docs(
pk=version.project.pk, version_pk=version.pk, record=record)
except Exception, e:
log.error("update_docs_pull failed", exc_info=True)
##############
# Random Tasks
##############
@task()
def remove_dir(path):
"""
Remove a directory on the build/celery server.
This is mainly a wrapper around shutil.rmtree so that app servers
can kill things on the build server.
"""
log.info("Removing %s" % path)
shutil.rmtree(path)
@task(queue='web')
def clear_artifacts(version_pk):
""" Remove artifacts from the web servers. """
version = Version.objects.get(pk=version_pk)
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='pdf', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='epub', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='htmlzip', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.rtd_build_path(version=version.slug))
|
|
"""Allow to set up simple automation rules via the config file."""
import asyncio
from functools import partial
import importlib
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_NAME, CONF_ID, CONF_PLATFORM,
EVENT_AUTOMATION_TRIGGERED, EVENT_HOMEASSISTANT_START, SERVICE_RELOAD,
SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_ON)
from homeassistant.core import Context, CoreState
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import condition, extract_domain_configs, script
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.loader import bind_hass
from homeassistant.util.dt import utcnow
DOMAIN = 'automation'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
GROUP_NAME_ALL_AUTOMATIONS = 'all automations'
CONF_ALIAS = 'alias'
CONF_HIDE_ENTITY = 'hide_entity'
CONF_CONDITION = 'condition'
CONF_ACTION = 'action'
CONF_TRIGGER = 'trigger'
CONF_CONDITION_TYPE = 'condition_type'
CONF_INITIAL_STATE = 'initial_state'
CONDITION_USE_TRIGGER_VALUES = 'use_trigger_values'
CONDITION_TYPE_AND = 'and'
CONDITION_TYPE_OR = 'or'
DEFAULT_CONDITION_TYPE = CONDITION_TYPE_AND
DEFAULT_HIDE_ENTITY = False
DEFAULT_INITIAL_STATE = True
ATTR_LAST_TRIGGERED = 'last_triggered'
ATTR_VARIABLES = 'variables'
SERVICE_TRIGGER = 'trigger'
_LOGGER = logging.getLogger(__name__)
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module('.{}'.format(config[CONF_PLATFORM]),
__name__)
except ImportError:
raise vol.Invalid('Invalid platform specified') from None
return platform.TRIGGER_SCHEMA(config)
_TRIGGER_SCHEMA = vol.All(
cv.ensure_list,
[
vol.All(
vol.Schema({
vol.Required(CONF_PLATFORM): str
}, extra=vol.ALLOW_EXTRA),
_platform_validator
),
]
)
_CONDITION_SCHEMA = vol.All(cv.ensure_list, [cv.CONDITION_SCHEMA])
PLATFORM_SCHEMA = vol.Schema({
# str on purpose
CONF_ID: str,
CONF_ALIAS: cv.string,
vol.Optional(CONF_INITIAL_STATE): cv.boolean,
vol.Optional(CONF_HIDE_ENTITY, default=DEFAULT_HIDE_ENTITY): cv.boolean,
vol.Required(CONF_TRIGGER): _TRIGGER_SCHEMA,
vol.Optional(CONF_CONDITION): _CONDITION_SCHEMA,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
})
SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
})
TRIGGER_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Optional(ATTR_VARIABLES, default={}): dict,
})
RELOAD_SERVICE_SCHEMA = vol.Schema({})
@bind_hass
def is_on(hass, entity_id):
"""
Return true if specified automation entity_id is on.
Async friendly.
"""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass, config):
"""Set up the automation."""
component = EntityComponent(_LOGGER, DOMAIN, hass,
group_name=GROUP_NAME_ALL_AUTOMATIONS)
await _async_process_config(hass, config, component)
async def trigger_service_handler(service_call):
"""Handle automation triggers."""
tasks = []
for entity in await component.async_extract_from_service(service_call):
tasks.append(entity.async_trigger(
service_call.data.get(ATTR_VARIABLES),
skip_condition=True,
context=service_call.context))
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
async def turn_onoff_service_handler(service_call):
"""Handle automation turn on/off service calls."""
tasks = []
method = 'async_{}'.format(service_call.service)
for entity in await component.async_extract_from_service(service_call):
tasks.append(getattr(entity, method)())
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
async def toggle_service_handler(service_call):
"""Handle automation toggle service calls."""
tasks = []
for entity in await component.async_extract_from_service(service_call):
if entity.is_on:
tasks.append(entity.async_turn_off())
else:
tasks.append(entity.async_turn_on())
if tasks:
await asyncio.wait(tasks, loop=hass.loop)
async def reload_service_handler(service_call):
"""Remove all automations and load new ones from config."""
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
hass.services.async_register(
DOMAIN, SERVICE_TRIGGER, trigger_service_handler,
schema=TRIGGER_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, toggle_service_handler,
schema=SERVICE_SCHEMA)
for service in (SERVICE_TURN_ON, SERVICE_TURN_OFF):
hass.services.async_register(
DOMAIN, service, turn_onoff_service_handler,
schema=SERVICE_SCHEMA)
return True
class AutomationEntity(ToggleEntity, RestoreEntity):
"""Entity to show status of entity."""
def __init__(self, automation_id, name, async_attach_triggers, cond_func,
async_action, hidden, initial_state):
"""Initialize an automation entity."""
self._id = automation_id
self._name = name
self._async_attach_triggers = async_attach_triggers
self._async_detach_triggers = None
self._cond_func = cond_func
self._async_action = async_action
self._last_triggered = None
self._hidden = hidden
self._initial_state = initial_state
@property
def name(self):
"""Name of the automation."""
return self._name
@property
def should_poll(self):
"""No polling needed for automation entities."""
return False
@property
def state_attributes(self):
"""Return the entity state attributes."""
return {
ATTR_LAST_TRIGGERED: self._last_triggered
}
@property
def hidden(self) -> bool:
"""Return True if the automation entity should be hidden from UIs."""
return self._hidden
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._async_detach_triggers is not None
async def async_added_to_hass(self) -> None:
"""Startup with initial state or previous state."""
await super().async_added_to_hass()
if self._initial_state is not None:
enable_automation = self._initial_state
_LOGGER.debug("Automation %s initial state %s from config "
"initial_state", self.entity_id, enable_automation)
else:
state = await self.async_get_last_state()
if state:
enable_automation = state.state == STATE_ON
self._last_triggered = state.attributes.get('last_triggered')
_LOGGER.debug("Automation %s initial state %s from recorder "
"last state %s", self.entity_id,
enable_automation, state)
else:
enable_automation = DEFAULT_INITIAL_STATE
_LOGGER.debug("Automation %s initial state %s from default "
"initial state", self.entity_id,
enable_automation)
if not enable_automation:
return
# HomeAssistant is starting up
if self.hass.state == CoreState.not_running:
async def async_enable_automation(event):
"""Start automation on startup."""
await self.async_enable()
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_enable_automation)
# HomeAssistant is running
else:
await self.async_enable()
async def async_turn_on(self, **kwargs) -> None:
"""Turn the entity on and update the state."""
if self.is_on:
return
await self.async_enable()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
if not self.is_on:
return
self._async_detach_triggers()
self._async_detach_triggers = None
await self.async_update_ha_state()
async def async_trigger(self, variables, skip_condition=False,
context=None):
"""Trigger automation.
This method is a coroutine.
"""
if not skip_condition and not self._cond_func(variables):
return
# Create a new context referring to the old context.
parent_id = None if context is None else context.id
trigger_context = Context(parent_id=parent_id)
self.async_set_context(trigger_context)
self.hass.bus.async_fire(EVENT_AUTOMATION_TRIGGERED, {
ATTR_NAME: self._name,
ATTR_ENTITY_ID: self.entity_id,
}, context=trigger_context)
await self._async_action(self.entity_id, variables, trigger_context)
self._last_triggered = utcnow()
await self.async_update_ha_state()
async def async_will_remove_from_hass(self):
"""Remove listeners when removing automation from HASS."""
await super().async_will_remove_from_hass()
await self.async_turn_off()
async def async_enable(self):
"""Enable this automation entity.
This method is a coroutine.
"""
if self.is_on:
return
self._async_detach_triggers = await self._async_attach_triggers(
self.async_trigger)
await self.async_update_ha_state()
@property
def device_state_attributes(self):
"""Return automation attributes."""
if self._id is None:
return None
return {
CONF_ID: self._id
}
async def _async_process_config(hass, config, component):
"""Process config and add automations.
This method is a coroutine.
"""
entities = []
for config_key in extract_domain_configs(config, DOMAIN):
conf = config[config_key]
for list_no, config_block in enumerate(conf):
automation_id = config_block.get(CONF_ID)
name = config_block.get(CONF_ALIAS) or "{} {}".format(config_key,
list_no)
hidden = config_block[CONF_HIDE_ENTITY]
initial_state = config_block.get(CONF_INITIAL_STATE)
action = _async_get_action(hass, config_block.get(CONF_ACTION, {}),
name)
if CONF_CONDITION in config_block:
cond_func = _async_process_if(hass, config, config_block)
if cond_func is None:
continue
else:
def cond_func(variables):
"""Condition will always pass."""
return True
async_attach_triggers = partial(
_async_process_trigger, hass, config,
config_block.get(CONF_TRIGGER, []), name
)
entity = AutomationEntity(
automation_id, name, async_attach_triggers, cond_func, action,
hidden, initial_state)
entities.append(entity)
if entities:
await component.async_add_entities(entities)
def _async_get_action(hass, config, name):
"""Return an action based on a configuration."""
script_obj = script.Script(hass, config, name)
async def action(entity_id, variables, context):
"""Execute an action."""
_LOGGER.info('Executing %s', name)
try:
await script_obj.async_run(variables, context)
except Exception as err: # pylint: disable=broad-except
script_obj.async_log_exception(
_LOGGER,
'Error while executing automation {}'.format(entity_id), err)
return action
def _async_process_if(hass, config, p_config):
"""Process if checks."""
if_configs = p_config.get(CONF_CONDITION)
checks = []
for if_config in if_configs:
try:
checks.append(condition.async_from_config(if_config, False))
except HomeAssistantError as ex:
_LOGGER.warning('Invalid condition: %s', ex)
return None
def if_action(variables=None):
"""AND all conditions."""
return all(check(hass, variables) for check in checks)
return if_action
async def _async_process_trigger(hass, config, trigger_configs, name, action):
"""Set up the triggers.
This method is a coroutine.
"""
removes = []
info = {
'name': name
}
for conf in trigger_configs:
platform = importlib.import_module('.{}'.format(conf[CONF_PLATFORM]),
__name__)
remove = await platform.async_trigger(hass, conf, action, info)
if not remove:
_LOGGER.error("Error setting up trigger %s", name)
continue
_LOGGER.info("Initialized trigger %s", name)
removes.append(remove)
if not removes:
return None
def remove_triggers():
"""Remove attached triggers."""
for remove in removes:
remove()
return remove_triggers
|
|
import logging
import re
import datetime
import json
from pytz.gae import pytz
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.datastore import entity_pb
from data_model import DeveloperRequest
def validateDevKey(devKey):
if devKey is None or devKey == "kiosk":
return None
else:
devKey = devKey.lower()
storeKey = memcache.get(devKey)
if storeKey is None:
#logging.error('Dev key - %s - cache miss')
q = db.GqlQuery("SELECT __key__ FROM DeveloperKeys WHERE developerKey = :1", devKey)
storeKey = q.get()
if storeKey is None:
# @todo we can create a black list for abusive keys to avoid the
# datastore query all together if this becomes a problem
logging.debug('API : illegal access using devkey %s' % devKey)
return None
else:
logging.debug('API : devkey cache miss!')
memcache.set(devKey, storeKey)
# we've validated the dev key at this point... start counting requests
total = memcache.incr(devKey + ':counter', initial_value=0)
return storeKey
## end validateDevKey()
def conformStopID(stopID):
# we assume the stopID is four characters long. if we find it is
# less than that, pad the front-end of it with zeros.
if len(stopID) < 4:
if len(stopID) == 2:
stopID = "00" + stopID
elif len(stopID) == 1:
stopID = "000" + stopID
else:
stopID = "0" + stopID
return stopID
## end conformStopID()
def conformRouteID(routeID):
# routeID should be two digits
if len(routeID) == 1:
routeID = "0" + routeID
return routeID
## end conformRouteID
def inthepast(time):
if computeCountdownMinutes(time) < 0:
return True
else:
return False
## end inthepast
def getLocalDatetime():
utc_dt = datetime.datetime.now()
central_tz = pytz.timezone('US/Central')
utc = pytz.utc
ltime = utc.localize(utc_dt).astimezone(central_tz)
return ltime
## end getLocalDatetime
def getLocalTimestamp():
ltime = getLocalDatetime()
ltime_stamp = ltime.strftime('%I:%M%p')
return(ltime_stamp)
## end getLocalTimestamp()
def computeCountdownMinutes(arrivalTime):
ltime = getLocalDatetime()
ltime_hour = ltime.hour
#ltime_hour += 24 if ltime_hour < 0 else 0
ltime_min = ltime_hour * 60 + ltime.minute
#logging.debug("local time: %s hours, or %s minutes" % (ltime_hour,ltime_min))
# pull out the arrival time
#logging.debug("API: parsing arrival time of %s" % arrivalTime)
m = re.search('(\d+):(\d+)\s(.*?)',arrivalTime)
btime_hour = arrival_hour = int(m.group(1))
btime_hour += 12 if arrivalTime.find('pm') > 0 and arrival_hour < 12 else 0
btime_min = btime_hour * 60 + int(m.group(2))
#logging.debug("computing countdown with %s. %s hours %s minutes" % (arrivalTime,btime_hour,btime_min))
delta_in_min = btime_min - ltime_min
#logging.debug('API: countdown is %s minutes'% delta_in_min)
return(delta_in_min)
## end computeCountdownMinutes()
# Checks to see if the current time is during the hours
# in which the Metro doesn't operate
#
def afterHours():
ltime = getLocalDatetime()
hour = ltime.hour
weekday = ltime.weekday()
# late night service on Friday/Saturday
if( weekday >= 5 ):
if hour > 2 and hour < 5:
return True
else:
return False
else:
if hour > 1 and hour < 5:
return True
else:
return False
## end afterHours()
def buildErrorResponse(error,description):
# encapsulate response in json
response_dict = {'status':error,
'timestamp':getLocalTimestamp(),
'description':description,
}
return response_dict
## end jsonError()
def getDirectionLabel(directionID):
directionLabel = memcache.get(directionID)
if directionLabel is None:
q = db.GqlQuery("SELECT * FROM DestinationListing WHERE id = :1", directionID)
directionQuery = q.fetch(1)
if len(directionQuery) > 0:
#logging.debug("Found destination ID mapping... %s :: %s" % (directionQuery[0].id,directionQuery[0].label))
directionLabel = directionQuery[0].label
memcache.set(directionID, directionLabel)
else:
logging.error("ERROR: We don't have a record of this direction ID!?! Impossible! %s" % directionID)
directionLabel = "unknown"
return directionLabel
## end getDirectionLabel()
def handle_500(request, response, exception):
logging.error('Server ERROR :: %s' % exception)
callback = request.get('callback')
if callback is not '':
response.headers['Content-Type'] = 'application/javascript'
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET'
else:
response.headers['Content-Type'] = 'application/json'
response.out.write(json.dumps(buildErrorResponse("-1","Internal server error")))
GETARRIVALS = "get arrivals"
GETVEHICLES = "get vehicles"
GETSTOPS = "get stops"
GETROUTES = "get routes"
GETNEARBYSTOPS = "get nearby stops"
GETSTOPLOCATION = "get stop location"
def recordDeveloperRequest(devKey,type,terms,ipaddr,error='success'):
# this is too damn expensive to store all of these on app engine
# so we're going to ignore these requests
if False:
req = DeveloperRequest()
req.developer = devKey
req.type = type
req.error = error
req.requestTerms = terms
req.remoteAddr = ipaddr
req.put()
## end recordDeveloperRequest()
def serialize_entities(models):
if models is None:
return None
elif isinstance(models, db.Model):
# Just one instance
return db.model_to_protobuf(models).Encode()
else:
# A list
return [db.model_to_protobuf(x).Encode() for x in models]
def deserialize_entities(data):
if data is None:
return None
elif isinstance(data, str):
# Just one instance
return db.model_from_protobuf(entity_pb.EntityProto(data))
else:
return [db.model_from_protobuf(entity_pb.EntityProto(x)) for x in data]
def get_time_from_text(text_with_time, findPos=0):
# takes unstructured text that contains a time substring and returns '00:00 PM'
colon_index = text_with_time.find(':', findPos)
if colon_index > -1:
if colon_index == 1:
i_start = colon_index - 1
else:
i_start = colon_index - 2
i_end = colon_index + 6
return text_with_time[i_start:i_end].replace(' ', '')
else:
return ''
|
|
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit import utils
from cinder.tests.unit.volume.drivers.infortrend import test_infortrend_cli
from cinder.volume import configuration
from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli
SUCCEED = (0, '')
FAKE_ERROR_RETURN = (-1, '')
class InfortrendTestCass(test.TestCase):
def __init__(self, *args, **kwargs):
super(InfortrendTestCass, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendTestCass, self).setUp()
self.cli_data = test_infortrend_cli.InfortrendCLITestData()
self.configuration = configuration.Configuration(
[], config_group=configuration.SHARED_CONF_GROUP)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.safe_get = self._fake_safe_get
def _fake_safe_get(self, key):
return getattr(self.configuration, key)
def _driver_setup(self, mock_commands, configuration=None):
if configuration is None:
configuration = self.configuration
self.driver = self._get_driver(configuration)
mock_commands_execute = self._mock_command_execute(mock_commands)
mock_cli = mock.Mock(side_effect=mock_commands_execute)
self.driver._execute_command = mock_cli
def _get_driver(self, conf):
raise NotImplementedError
def _mock_command_execute(self, mock_commands):
def fake_execute_command(cli_type, *args, **kwargs):
if cli_type in mock_commands.keys():
if isinstance(mock_commands[cli_type], list):
ret = mock_commands[cli_type][0]
del mock_commands[cli_type][0]
return ret
elif isinstance(mock_commands[cli_type], tuple):
return mock_commands[cli_type]
else:
return mock_commands[cli_type](*args, **kwargs)
return FAKE_ERROR_RETURN
return fake_execute_command
def _mock_show_lv_for_migrate(self, *args, **kwargs):
if 'tier' in args:
return self.cli_data.get_test_show_lv_tier_for_migration()
return self.cli_data.get_test_show_lv()
def _mock_show_lv(self, *args, **kwargs):
if 'tier' in args:
return self.cli_data.get_test_show_lv_tier()
return self.cli_data.get_test_show_lv()
def _assert_cli_has_calls(self, expect_cli_cmd):
self.driver._execute_command.assert_has_calls(expect_cli_cmd)
class InfortrendFCCommonTestCase(InfortrendTestCass):
def __init__(self, *args, **kwargs):
super(InfortrendFCCommonTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendFCCommonTestCase, self).setUp()
self.configuration.volume_backend_name = 'infortrend_backend_1'
self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0]
self.configuration.san_password = '111111'
self.configuration.infortrend_provisioning = 'full'
self.configuration.infortrend_tiering = '0'
self.configuration.infortrend_pools_name = 'LV-1, LV-2'
self.configuration.infortrend_slots_a_channels_id = '0,5'
self.configuration.infortrend_slots_b_channels_id = '0,5'
self.configuration.infortrend_cli_timeout = 30
def _get_driver(self, conf):
return common_cli.InfortrendCommon('FC', configuration=conf)
def test_normal_channel(self):
test_map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(True)
self.assertDictEqual(test_map_dict, self.driver.map_dict)
self.assertDictEqual(test_target_dict, self.driver.target_dict)
def test_normal_channel_with_r_model(self):
test_map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {'0': [], '5': []},
}
test_target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {'0': '113', '5': '113'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(True)
self.assertDictEqual(test_map_dict, self.driver.map_dict)
self.assertDictEqual(test_target_dict, self.driver.target_dict)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_without_mcs(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictEqual(self.cli_data.test_fc_properties, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_specific_channel(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '5'
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictEqual(
self.cli_data.test_fc_properties_with_specific_channel, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_diff_target_id(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '5'
mock_commands = {
'ShowChannel':
self.cli_data.get_test_show_channel_with_diff_target_id(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[0]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictEqual(
self.cli_data.test_fc_properties_with_specific_channel, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_multipath_with_r_model(self):
test_volume = self.cli_data.test_volume
test_connector = copy.deepcopy(self.cli_data.test_connector_fc)
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn(),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictEqual(
self.cli_data.test_fc_properties_multipath_r_model, properties)
def test_initialize_connection_with_get_wwn_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2]
test_lookup_map = self.cli_data.fake_lookup_map
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictEqual(
self.cli_data.test_fc_properties_zoning, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning_r_model(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[:]
test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2]
test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1]
test_lookup_map = self.cli_data.fake_lookup_map_r_model
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictEqual(
self.cli_data.test_fc_properties_zoning_r_model, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning_r_model_diff_target_id(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[:]
test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2]
test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1]
test_lookup_map = self.cli_data.fake_lookup_map_r_model
mock_commands = {
'ShowChannel':
self.cli_data.get_test_show_channel_r_model_diff_target_id(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_diff_target_id(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictEqual(
self.cli_data.test_fc_properties_zoning_r_model, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'ShowWWN': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.terminate_connection(test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection_with_zoning(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2]
test_lookup_map = self.cli_data.fake_lookup_map
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
self.driver.map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
conn_info = self.driver.terminate_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictEqual(
self.cli_data.test_fc_terminate_conn_info, conn_info)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection_with_zoning_and_lun_map_exist(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_show_map_with_lun_map_on_zoning(),
}
self._driver_setup(mock_commands)
self.driver.map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
self.driver.target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {},
}
self.driver.fc_lookup_service = mock.Mock()
conn_info = self.driver.terminate_connection(
test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
]
expect_conn_info = {'driver_volume_type': 'fibre_channel',
'data': {}}
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(expect_conn_info, conn_info)
class InfortrendiSCSICommonTestCase(InfortrendTestCass):
def __init__(self, *args, **kwargs):
super(InfortrendiSCSICommonTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendiSCSICommonTestCase, self).setUp()
self.configuration.volume_backend_name = 'infortrend_backend_1'
self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0]
self.configuration.san_password = '111111'
self.configuration.infortrend_provisioning = 'full'
self.configuration.infortrend_tiering = '0'
self.configuration.infortrend_pools_name = 'LV-1, LV-2'
self.configuration.infortrend_slots_a_channels_id = '1,2,4'
self.configuration.infortrend_slots_b_channels_id = '1,2,4'
def _get_driver(self, conf):
return common_cli.InfortrendCommon('iSCSI', configuration=conf)
@mock.patch.object(common_cli.LOG, 'warning')
def test_create_map_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'CreateMap': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('CreateMap')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_map_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (11, '')
mock_commands = {
'DeleteMap': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteMap')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_create_iqn_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'CreateIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('CreateIQN')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_iqn_warning_return_code_has_map(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'DeleteIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteIQN')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_iqn_warning_return_code_no_such_name(self, log_warning):
FAKE_RETURN_CODE = (11, '')
mock_commands = {
'DeleteIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteIQN')
self.assertEqual(1, log_warning.call_count)
def test_normal_channel(self):
test_map_dict = {
'slot_a': {'1': [], '2': [], '4': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0', '4': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info()
self.assertDictEqual(test_map_dict, self.driver.map_dict)
self.assertDictEqual(test_target_dict, self.driver.target_dict)
def test_normal_channel_with_multipath(self):
test_map_dict = {
'slot_a': {'1': [], '2': [], '4': []},
'slot_b': {'1': [], '2': [], '4': []},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0', '4': '0'},
'slot_b': {'1': '1', '2': '1', '4': '1'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(multipath=True)
self.assertDictEqual(test_map_dict, self.driver.map_dict)
self.assertDictEqual(test_target_dict, self.driver.target_dict)
def test_specific_channel(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '2, 4'
test_map_dict = {
'slot_a': {'2': [], '4': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'2': '0', '4': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info()
self.assertDictEqual(test_map_dict, self.driver.map_dict)
self.assertDictEqual(test_target_dict, self.driver.target_dict)
def test_update_mcs_dict(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
test_mcs_dict = {
'slot_a': {'1': ['1', '2'], '2': ['4']},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info()
self.assertDictEqual(test_mcs_dict, self.driver.mcs_dict)
def test_mapping_info_with_mcs(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
fake_mcs_dict = {
'slot_a': {'0': ['1', '2'], '2': ['4']},
'slot_b': {},
}
lun_list = list(range(0, 127))
fake_map_dict = {
'slot_a': {'1': lun_list[2:], '2': lun_list[:], '4': lun_list[1:]},
'slot_b': {},
}
test_map_chl = {
'slot_a': ['1', '2'],
}
test_map_lun = ['2']
test_mcs_id = '0'
self.driver = self._get_driver(configuration)
self.driver.mcs_dict = fake_mcs_dict
self.driver.map_dict = fake_map_dict
map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs()
self.assertDictEqual(test_map_chl, map_chl)
self.assertEqual(test_map_lun, map_lun)
self.assertEqual(test_mcs_id, mcs_id)
def test_mapping_info_with_mcs_multi_group(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
fake_mcs_dict = {
'slot_a': {'0': ['1', '2'], '1': ['3', '4'], '2': ['5']},
'slot_b': {},
}
lun_list = list(range(0, 127))
fake_map_dict = {
'slot_a': {
'1': lun_list[2:],
'2': lun_list[:],
'3': lun_list[:],
'4': lun_list[1:],
'5': lun_list[:],
},
'slot_b': {},
}
test_map_chl = {
'slot_a': ['3', '4'],
}
test_map_lun = ['1']
test_mcs_id = '1'
self.driver = self._get_driver(configuration)
self.driver.mcs_dict = fake_mcs_dict
self.driver.map_dict = fake_map_dict
map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs()
self.assertDictEqual(test_map_chl, map_chl)
self.assertEqual(test_map_lun, map_lun)
self.assertEqual(test_mcs_id, mcs_id)
def test_specific_channel_with_multipath(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '1,2'
test_map_dict = {
'slot_a': {'1': [], '2': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info(multipath=True)
self.assertDictEqual(test_map_dict, self.driver.map_dict)
self.assertDictEqual(test_target_dict, self.driver.target_dict)
def test_specific_channel_with_multipath_r_model(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '1,2'
configuration.infortrend_slots_b_channels_id = '1'
test_map_dict = {
'slot_a': {'1': [], '2': []},
'slot_b': {'1': []},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0'},
'slot_b': {'1': '1'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info(multipath=True)
self.assertDictEqual(test_map_dict, self.driver.map_dict)
self.assertDictEqual(test_target_dict, self.driver.target_dict)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume(self, log_info):
test_volume = self.cli_data.test_volume
test_model_update = {
'provider_location': 'partition_id^%s@system_id^%s' % (
self.cli_data.fake_partition_id[0],
int(self.cli_data.fake_system_id[0], 16))
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume(test_volume)
self.assertDictEqual(test_model_update, model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_volume_with_create_fail(self):
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': FAKE_ERROR_RETURN,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_volume,
test_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_volume(self, log_info):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_snapshot_id = self.cli_data.fake_snapshot_id
test_pair_id = self.cli_data.fake_pair_id
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
expect_cli_cmd = [
mock.call('ShowPartition', '-l'),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id[0], '-y'),
mock.call('ShowSnapshot', 'part=%s' % test_partition_id),
mock.call('DeleteSnapshot', test_snapshot_id[0], '-y'),
mock.call('DeleteSnapshot', test_snapshot_id[1], '-y'),
mock.call('ShowMap', 'part=%s' % test_partition_id),
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('DeletePartition', test_partition_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'warning', mock.Mock())
def test_delete_volume_with_sync_pair(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_sync_pair(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.delete_volume,
test_volume)
def test_delete_volume_with_delete_fail(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.delete_volume,
test_volume)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_volume_with_partiton_not_found(self, log_warning):
test_volume = self.cli_data.test_volume
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_empty_list(),
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_volume_without_provider(self, log_info):
test_system_id = self.cli_data.fake_system_id[0]
test_volume = copy.deepcopy(self.cli_data.test_volume)
test_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(test_system_id, 16), 'None')
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
self.assertEqual(1, log_info.call_count)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_cloned_volume(self, log_info):
fake_partition_id = self.cli_data.fake_partition_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_src_volume = self.cli_data.test_volume
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_model_update = {
'provider_location': 'partition_id^%s@system_id^%s' % (
self.cli_data.fake_partition_id[1],
int(self.cli_data.fake_system_id[0], 16))
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
fake_partition_id, test_dst_part_id, test_dst_volume_id),
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_cloned_volume(
test_dst_volume, test_src_volume)
self.assertDictEqual(test_model_update, model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_cloned_volume_different_size(self, log_info):
fake_partition_id = self.cli_data.fake_partition_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume['size'] = 10
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_src_volume = self.cli_data.test_volume
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_model_update = {
'provider_location': 'partition_id^%s@system_id^%s' % (
self.cli_data.fake_partition_id[1],
int(self.cli_data.fake_system_id[0], 16))
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
fake_partition_id, test_dst_part_id, test_dst_volume_id),
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_cloned_volume(
test_dst_volume, test_src_volume)
self.assertDictEqual(test_model_update, model_update)
log_info.assert_called_once()
self.assertEqual(10, test_dst_volume['size'])
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_cloned_volume_with_create_replica_fail(self):
test_dst_volume = self.cli_data.test_dst_volume
test_src_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': FAKE_ERROR_RETURN,
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_cloned_volume,
test_dst_volume,
test_src_volume)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_export(self):
test_volume = self.cli_data.test_volume
test_model_update = {
'provider_location': test_volume['provider_location'],
}
self.driver = self._get_driver(self.configuration)
model_update = self.driver.create_export(None, test_volume)
self.assertDictEqual(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_get_volume_stats(self):
test_volume_states = self.cli_data.test_volume_states
mock_commands = {
'ShowLicense': self.cli_data.get_test_show_license(),
'ShowLV': self.cli_data.get_test_show_lv(),
'ShowPartition': self.cli_data.get_test_show_partition_detail(),
}
self._driver_setup(mock_commands)
self.driver.VERSION = '99.99'
volume_states = self.driver.get_volume_stats(True)
self.assertDictEqual(test_volume_states, volume_states)
def test_get_volume_stats_fail(self):
mock_commands = {
'ShowLicense': self.cli_data.get_test_show_license(),
'ShowLV': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.get_volume_stats)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_snapshot(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
model_update = self.driver.create_snapshot(self.cli_data.test_snapshot)
self.assertEqual(fake_snapshot_id, model_update['provider_location'])
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_snapshot_without_partition_id(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
def test_create_snapshot_with_create_fail(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': FAKE_ERROR_RETURN,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
def test_create_snapshot_with_show_fail(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': FAKE_ERROR_RETURN,
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_snapshot(self, log_info):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteSnapshot': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_snapshot(test_snapshot)
self.assertEqual(1, log_info.call_count)
def test_delete_snapshot_without_provider_location(self):
test_snapshot = self.cli_data.test_snapshot
self.driver = self._get_driver(self.configuration)
self.driver._get_raid_snapshot_id = mock.Mock(return_value=None)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.delete_snapshot,
test_snapshot)
def test_delete_snapshot_with_fail(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteSnapshot': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.delete_snapshot,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'warning', mock.Mock())
def test_delete_snapshot_with_sync_pair(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_si_sync_pair(),
'DeleteSnapshot': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.delete_snapshot,
test_snapshot)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume_from_snapshot(self, log_info):
test_snapshot = self.cli_data.test_snapshot
test_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_model_update = {
'provider_location': 'partition_id^%s@system_id^%s' % (
self.cli_data.fake_partition_id[1],
int(self.cli_data.fake_system_id[0], 16))
}
mock_commands = {
'ShowSnapshot':
self.cli_data.get_test_show_snapshot_detail_filled_block(),
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_snapshot_id, test_dst_part_id, test_dst_volume_id),
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume_from_snapshot(
test_dst_volume, test_snapshot)
self.assertDictEqual(test_model_update, model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume_from_snapshot_with_different_size(self, log_info):
test_snapshot = self.cli_data.test_snapshot
test_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume['size'] = 10
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_model_update = {
'provider_location': 'partition_id^%s@system_id^%s' % (
self.cli_data.fake_partition_id[1],
int(self.cli_data.fake_system_id[0], 16))
}
mock_commands = {
'ShowSnapshot':
self.cli_data.get_test_show_snapshot_detail_filled_block(),
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_snapshot_id, test_dst_part_id, test_dst_volume_id),
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume_from_snapshot(
test_dst_volume, test_snapshot)
self.assertDictEqual(test_model_update, model_update)
self.assertEqual(1, log_info.call_count)
self.assertEqual(10, test_dst_volume['size'])
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume_from_snapshot_without_filled_block(self, log_info):
test_snapshot = self.cli_data.test_snapshot
test_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_src_part_id = self.cli_data.fake_partition_id[0]
test_model_update = {
'provider_location': 'partition_id^%s@system_id^%s' % (
self.cli_data.fake_partition_id[1],
int(self.cli_data.fake_system_id[0], 16))
}
mock_commands = {
'ShowSnapshot': self.cli_data.get_test_show_snapshot_detail(),
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica': [
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_dst_volume_id),
self.cli_data.get_test_show_replica_detail_for_migrate(
test_snapshot_id, test_dst_part_id, test_dst_volume_id),
],
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume_from_snapshot(
test_dst_volume, test_snapshot)
self.assertDictEqual(test_model_update, model_update)
self.assertEqual(1, log_info.call_count)
def test_create_volume_from_snapshot_without_provider_location(
self):
test_snapshot = self.cli_data.test_snapshot
test_dst_volume = self.cli_data.test_dst_volume
self.driver = self._get_driver(self.configuration)
self.driver._get_raid_snapshot_id = mock.Mock(return_value=None)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
test_dst_volume,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictEqual(test_iscsi_properties, properties)
expect_cli_cmd = [
mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_iqn_not_exist(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_initiator = copy.deepcopy(self.cli_data.fake_initiator_iqn[1])
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
test_connector['initiator'] = test_initiator
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateIQN': SUCCEED,
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictEqual(test_iscsi_properties, properties)
expect_cli_cmd = [
mock.call('CreateIQN', test_initiator, test_initiator[-16:]),
mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_empty_map(self):
test_volume = self.cli_data.test_volume
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties_empty_map
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_empty_list(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictEqual(
self.cli_data.test_iscsi_properties_empty_map, properties)
def test_initialize_connection_with_create_map_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': FAKE_ERROR_RETURN,
'ShowNet': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
def test_initialize_connection_with_get_ip_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_mcs(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties_with_mcs
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictEqual(test_iscsi_properties, properties)
expect_cli_cmd = [
mock.call('CreateMap', 'part', test_partition_id, '1', '0', '2',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_extend_volume(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_new_size = 10
test_expand_size = test_new_size - test_volume['size']
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.extend_volume(test_volume, test_new_size)
expect_cli_cmd = [
mock.call('SetPartition', 'expand', test_partition_id,
'size=%sGB' % test_expand_size),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_extend_volume_mb(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_new_size = 5.5
test_expand_size = round((test_new_size - test_volume['size']) * 1024)
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.extend_volume(test_volume, test_new_size)
expect_cli_cmd = [
mock.call('SetPartition', 'expand', test_partition_id,
'size=%sMB' % test_expand_size),
]
self._assert_cli_has_calls(expect_cli_cmd)
def test_extend_volume_fail(self):
test_volume = self.cli_data.test_volume
test_new_size = 10
mock_commands = {
'SetPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.extend_volume,
test_volume,
test_new_size)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteIQN': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.terminate_connection(test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
mock.call('DeleteIQN', test_connector['initiator'][-16:]),
]
self._assert_cli_has_calls(expect_cli_cmd)
def test_terminate_connection_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'DeleteMap': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.terminate_connection,
test_volume,
test_connector)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_migrate_volume(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
test_pair_id = self.cli_data.fake_pair_id[0]
test_model_update = {
'provider_location': 'partition_id^%s@system_id^%s' % (
test_dst_part_id,
int(self.cli_data.fake_system_id[0], 16))
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id),
'DeleteReplica': SUCCEED,
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.migrate_volume(test_volume, test_host)
expect_cli_cmd = [
mock.call('CreatePartition',
fake_pool['pool_id'],
test_volume['id'].replace('-', ''),
'size=%s' % (test_volume['size'] * 1024),
''),
mock.call('ShowPartition'),
mock.call('CreateReplica',
'Cinder-Migrate',
'part', test_src_part_id,
'part', test_dst_part_id,
'type=mirror'),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id, '-y'),
mock.call('DeleteMap', 'part', test_src_part_id, '-y'),
mock.call('DeletePartition', test_src_part_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertTrue(rc)
self.assertDictEqual(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'warning')
def test_migrate_volume_with_invalid_storage(self, log_warning):
fake_host = self.cli_data.fake_host
test_volume = self.cli_data.test_volume
mock_commands = {
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.migrate_volume(test_volume, fake_host)
self.assertFalse(rc)
self.assertIsNone(model_update)
self.assertEqual(1, log_warning.call_count)
def test_migrate_volume_with_get_part_id_fail(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'DeleteMap': SUCCEED,
'CreateReplica': SUCCEED,
'CreateMap': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.migrate_volume,
test_volume,
test_host)
def test_migrate_volume_with_create_replica_fail(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume['id'].replace('-', ''), fake_pool['pool_id']),
'DeleteMap': SUCCEED,
'CreateReplica': FAKE_ERROR_RETURN,
'CreateMap': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.migrate_volume,
test_volume,
test_host)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_migrate_volume_timeout(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
configuration = copy.copy(self.configuration)
configuration.infortrend_cli_timeout = 0
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id,
'Copy'),
}
self._driver_setup(mock_commands, configuration)
self.assertRaises(
exception.VolumeDriverException,
self.driver.migrate_volume,
test_volume,
test_host)
def test_manage_existing_get_size(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
size = self.driver.manage_existing_get_size(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('ShowMap', 'part=%s' % test_partition_id),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, size)
def test_manage_existing_get_size_with_import(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume_with_import
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
test_ref_volume['source-name'], test_pool),
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
size = self.driver.manage_existing_get_size(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('ShowMap', 'part=%s' % test_partition_id),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, size)
def test_manage_existing_get_size_in_use(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': self.cli_data.get_test_show_map(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_no_source_id(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_dst_volume
self.driver = self._get_driver(self.configuration)
self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_show_part_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
mock_commands = {
'ShowPartition': FAKE_ERROR_RETURN,
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_show_map_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_manage_existing(self, log_info):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
test_model_update = {
'provider_location': 'partition_id^%s@system_id^%s' % (
test_partition_id,
int(self.cli_data.fake_system_id[0], 16))
}
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'SetPartition': SUCCEED,
'ShowDevice': self.cli_data.get_test_show_device(),
}
self._driver_setup(mock_commands)
model_update = self.driver.manage_existing(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('SetPartition', test_partition_id,
'name=%s' % test_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
self.assertDictEqual(test_model_update, model_update)
def test_manage_existing_rename_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'SetPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing,
test_volume,
test_ref_volume)
def test_manage_existing_with_part_not_found(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail(),
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing,
test_volume,
test_ref_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_manage_existing_with_import(self, log_info):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume_with_import
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_model_update = {
'provider_location': 'partition_id^%s@system_id^%s' % (
test_partition_id,
int(self.cli_data.fake_system_id[0], 16))
}
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
test_ref_volume['source-name'], test_pool),
'SetPartition': SUCCEED,
'ShowDevice': self.cli_data.get_test_show_device(),
}
self._driver_setup(mock_commands)
model_update = self.driver.manage_existing(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('SetPartition', test_partition_id,
'name=%s' % test_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
self.assertDictEqual(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'info')
def test_unmanage(self, log_info):
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.unmanage(test_volume)
expect_cli_cmd = [
mock.call(
'SetPartition',
test_partition_id,
'name=cinder-unmanaged-%s' % test_volume_id[:-17]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'info')
def test_retype_without_change(self, log_info):
test_volume = self.cli_data.test_volume
test_new_type = self.cli_data.test_new_type
test_diff = {'extra_specs': {}}
test_host = self.cli_data.test_migrate_host_2
self.driver = self._get_driver(self.configuration)
rc = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
self.assertTrue(rc)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_retype_with_change_provision(self, log_warning):
test_volume = self.cli_data.test_volume
test_new_type = self.cli_data.test_new_type
test_diff = self.cli_data.test_diff
test_host = self.cli_data.test_migrate_host_2
self.driver = self._get_driver(self.configuration)
rc = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
self.assertFalse(rc)
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_retype_with_migrate(self):
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_new_type = self.cli_data.test_new_type
test_diff = self.cli_data.test_diff
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
test_pair_id = self.cli_data.fake_pair_id[0]
test_model_update = {
'provider_location': 'partition_id^%s@system_id^%s' % (
test_dst_part_id,
int(self.cli_data.fake_system_id[0], 16))
}
mock_commands = {
'ShowSnapshot': SUCCEED,
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id),
'DeleteReplica': SUCCEED,
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
min_size = int(test_volume['size'] * 1024 * 0.2)
create_params = 'init=disable min=%sMB' % min_size
expect_cli_cmd = [
mock.call('ShowSnapshot', 'part=%s' % test_src_part_id),
mock.call(
'CreatePartition',
fake_pool['pool_id'],
test_volume['id'].replace('-', ''),
'size=%s' % (test_volume['size'] * 1024),
create_params,
),
mock.call('ShowPartition'),
mock.call(
'CreateReplica',
'Cinder-Migrate',
'part', test_src_part_id,
'part', test_dst_part_id,
'type=mirror'
),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id, '-y'),
mock.call('DeleteMap', 'part', test_src_part_id, '-y'),
mock.call('DeletePartition', test_src_part_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertTrue(rc)
self.assertDictEqual(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'debug', mock.Mock())
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_update_migrated_volume(self):
src_volume = self.cli_data.test_volume
dst_volume = copy.deepcopy(self.cli_data.test_dst_volume)
test_dst_part_id = self.cli_data.fake_partition_id[1]
dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
test_model_update = {
'_name_id': None,
'provider_location': dst_volume['provider_location'],
}
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.update_migrated_volume(
None, src_volume, dst_volume, 'available')
expect_cli_cmd = [
mock.call('SetPartition', test_dst_part_id,
'name=%s' % src_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictEqual(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'debug', mock.Mock())
def test_update_migrated_volume_rename_fail(self):
src_volume = self.cli_data.test_volume
dst_volume = self.cli_data.test_dst_volume
dst_volume['_name_id'] = 'fake_name_id'
test_dst_part_id = self.cli_data.fake_partition_id[1]
dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
mock_commands = {
'SetPartition': FAKE_ERROR_RETURN
}
self._driver_setup(mock_commands)
model_update = self.driver.update_migrated_volume(
None, src_volume, dst_volume, 'available')
self.assertEqual({'_name_id': 'fake_name_id'}, model_update)
|
|
"""Definitions for the `Fallback` class."""
import os
import astropy.constants as c
import numpy as np
from scipy.interpolate import interp1d
from mosfit.constants import C_CGS, DAY_CGS, FOUR_PI, M_SUN_CGS
from mosfit.modules.engines.engine import Engine
CLASS_NAME = 'Fallback'
class Fallback(Engine):
"""A tde engine."""
def __init__(self, **kwargs):
"""Initialize module.
Loads and interpolates tde simulation data. Simulation data is
from Guillochon 2013 and can be found on astrocrash.net.
The files in data directory have been converted from dm/dt space
to dm/de space.
"""
super(Fallback, self).__init__(**kwargs)
G = c.G.cgs.value # 6.67259e-8 cm3 g-1 s-2
Mhbase = 1.0e6 * M_SUN_CGS # this is the generic size of bh used
self.EXTRAPOLATE = True
# ------ DIRECTORY PARAMETERS -------
# It is assumed that there are different files for each beta
# (such as 2.500.dat for beta = 2.5)
# The first row is energy, the second is dmde.
self._gammas = ['4-3', '5-3']
# dictionaries with gamma's as keys.
self._beta_slope = {self._gammas[0]: [], self._gammas[1]: []}
self._beta_yinter = {self._gammas[0]: [], self._gammas[1]: []}
self._sim_beta = {self._gammas[0]: [], self._gammas[1]: []}
self._mapped_time = {self._gammas[0]: [], self._gammas[1]: []}
# for converting back from mapped time to actual times and doing
# interpolation in actual time
self._premaptime = {self._gammas[0]: [], self._gammas[1]: []}
self._premapdmdt = {self._gammas[0]: [], self._gammas[1]: []}
for g in self._gammas:
dmdedir = (os.path.dirname(__file__)[:-15] + 'models/tde/data/' +
g + '/')
# --------- GET SIMULATION BETAS -----------------
sim_beta_files = os.listdir(dmdedir)
simbeta = [float(b[:-4]) for b in sim_beta_files]
sortedindices = np.argsort(simbeta)
simbeta = [simbeta[i] for i in sortedindices]
sim_beta_files = [sim_beta_files[i] for i in sortedindices]
self._sim_beta[g].extend(simbeta)
# ----- CREATE INTERPOLATION FUNCTIONS; FIND SLOPES & YINTERs -----
time = {}
dmdt = {}
ipeak = {}
mapped_time = {}
# get dmdt and t for the lowest beta value
# energy & dmde (cgs)
e, d = np.loadtxt(dmdedir + sim_beta_files[0])
# only convert dm/de --> dm/dt for mass that is bound to BH (e < 0)
ebound = e[e < 0]
dmdebound = d[e < 0]
if min(dmdebound) < 0: # shouldn't happen, just a check
print('beta, gamma, negative dmde bound:', self._sim_beta[g],
g, dmdebound[dmdebound < 0])
# calculate de/dt, time and dm/dt arrays
# de/dt in log(/s), time in log(seconds), dm/dt in log(g/s)
dedt = (1.0 / 3.0) * (-2.0 * ebound) ** (5.0 / 2.0) / \
(2.0 * np.pi * G * Mhbase)
time['lo'] = np.log10((2.0 * np.pi * G * Mhbase) *
(-2.0 * ebound) ** (-3.0 / 2.0))
dmdt['lo'] = np.log10(dmdebound * dedt)
ipeak['lo'] = np.argmax(dmdt['lo'])
# split time['lo'] & dmdt['lo'] into pre-peak and post-peak array
time['lo'] = np.array([
time['lo'][:ipeak['lo']],
time['lo'][ipeak['lo']:]]) # peak in array 2
dmdt['lo'] = np.array([
dmdt['lo'][:ipeak['lo']],
dmdt['lo'][ipeak['lo']:]]) # peak in array 2
# will contain time/dmdt arrays
# (split into pre & post peak times/dmdts)
# for each beta value
self._premaptime[g].append(np.copy(time['lo']))
self._premapdmdt[g].append(np.copy(dmdt['lo']))
for i in range(1, len(self._sim_beta[g])):
# indexing this way bc calculating slope and yintercepts
# BETWEEN each simulation beta
e, d = np.loadtxt(dmdedir + sim_beta_files[i])
# only convert dm/de --> dm/dt for mass bound to BH (e < 0)
ebound = e[e < 0]
dmdebound = d[e < 0]
if min(dmdebound) < 0: # shouldn't happen, just a check
print('beta, gamma, negative dmde bound:',
self._sim_beta[g], g, dmdebound[dmdebound < 0])
# calculate de/dt, time and dm/dt arrays
# de/dt in log(erg/s), time in log(seconds), dm/dt in log(g/s)
dedt = (1.0 / 3.0) * (-2.0 * ebound) ** (5.0 / 2.0) / \
(2.0 * np.pi * G * Mhbase)
time['hi'] = np.log10((2.0 * np.pi * G * Mhbase) *
(-2.0 * ebound) ** (-3.0 / 2.0))
dmdt['hi'] = np.log10(dmdebound * dedt)
ipeak['hi'] = np.argmax(dmdt['hi'])
# split time_hi and dmdt_hi into pre-peak and post-peak array
# peak in 2nd array
time['hi'] = np.array([time['hi'][:ipeak['hi']],
time['hi'][ipeak['hi']:]])
dmdt['hi'] = np.array([dmdt['hi'][:ipeak['hi']],
dmdt['hi'][ipeak['hi']:]])
# will contain time/dmdt arrays
# (split into pre & post peak times/dmdts)
# for each beta value
self._premapdmdt[g].append(np.copy(dmdt['hi']))
self._premaptime[g].append(np.copy(time['hi']))
mapped_time['hi'] = []
mapped_time['lo'] = []
self._beta_slope[g].append([])
self._beta_yinter[g].append([])
self._mapped_time[g].append([])
for j in [0, 1]: # once before peak, once after peak
# choose more densely sampled curve to map times to 0-1
# less densely sampled curve will be interpolated to match
if len(time['lo'][j]) < len(time['hi'][j]):
# hi array more densely sampled
interp = 'lo'
nointerp = 'hi'
else:
# will also catch case where they have the same lengths
interp = 'hi'
nointerp = 'lo'
# map times from more densely sampled curves
# (both pre & post peak, might be from diff. dmdts)
# to 0 - 1
mapped_time[nointerp].append(
1. / (time[nointerp][j][-1] - time[nointerp][j][0]) *
(time[nointerp][j] - time[nointerp][j][0]))
mapped_time[interp].append(
1. / (time[interp][j][-1] - time[interp][j][0]) *
(time[interp][j] - time[interp][j][0]))
# ensure bounds are same for interp and nointerp
# before interpolation
# (should be 0 and 1 from above, but could be slightly off
# due to rounding errors in python)
mapped_time[interp][j][0] = 0
mapped_time[interp][j][-1] = 1
mapped_time[nointerp][j][0] = 0
mapped_time[nointerp][j][-1] = 1
func = interp1d(mapped_time[interp][j], dmdt[interp][j])
dmdtinterp = func(mapped_time[nointerp][j])
if interp == 'hi':
slope = ((dmdtinterp - dmdt['lo'][j]) /
(self._sim_beta[g][i] - self._sim_beta[g][
i - 1]))
else:
slope = ((dmdt['hi'][j] - dmdtinterp) /
(self._sim_beta[g][i] - self._sim_beta[g][
i - 1]))
self._beta_slope[g][-1].append(slope)
yinter1 = (dmdt[nointerp][j] - self._beta_slope[g][-1][j] *
self._sim_beta[g][i - 1])
yinter2 = (dmdtinterp - self._beta_slope[g][-1][j] *
self._sim_beta[g][i])
self._beta_yinter[g][-1].append((yinter1 + yinter2) / 2.0)
self._mapped_time[g][-1].append(
np.array(mapped_time[nointerp][j]))
time['lo'] = np.copy(time['hi'])
dmdt['lo'] = np.copy(dmdt['hi'])
def process(self, **kwargs):
"""Process module."""
beta_interp = True
beta_outside_range = False
Mhbase = 1.0e6 # in units of Msolar, this is generic Mh used
# in astrocrash sims
Mstarbase = 1.0 # in units of Msolar
Rstarbase = 1.0 # in units of Rsolar
# this is not beta, but rather a way to map beta_4-3 --> beta_5-3
# b = 0 --> min disruption, b = 1 --> full disruption,
# b = 2 --> max beta of sims
self._b = kwargs['b']
if 0 <= self._b < 1:
# 0.6 is min disruption beta for gamma = 4/3
# 1.85 is full disruption beta for gamma = 4/3
beta43 = 0.6 + 1.25 * self._b # 0.6 + (1.85 - 0.6)*b
# 0.5 is min disruption beta for gamma = 5/3
# 0.9 is full disruption beta for gamma = 5/3
beta53 = 0.5 + 0.4 * self._b # 0.5 + (0.9 - 0.5)*b
self._betas = {'4-3': beta43, '5-3': beta53}
elif 1 <= self._b <= 2:
beta43 = 1.85 + 2.15 * (self._b - 1)
beta53 = 0.9 + 1.6 * (self._b - 1)
self._betas = {'4-3': beta43, '5-3': beta53}
else:
self._printer.prt(
'b outside range, bmin = 0; bmax = 2; b = {}'.format(
self._b))
self._b = 2.0 if self._b > 2 else 0.0
beta_outside_range = True
# GET GAMMA VALUE
gamma_interp = False
self._Mstar = kwargs.get(self.key('starmass'))
if self._Mstar <= 0.3 or self._Mstar >= 22:
gammas = [self._gammas[1]] # gamma = ['5-3']
self._beta = self._betas['5-3']
elif 1 <= self._Mstar <= 15:
gammas = [self._gammas[0]] # gamma = ['4-3']
self._beta = self._betas['4-3']
elif 0.3 < self._Mstar < 1:
# region going from gamma = 5/3 to gamma = 4/3 as mass increases
gamma_interp = True
gammas = self._gammas
# gfrac should == 0 for 4/3; == 1 for 5/3
gfrac = (self._Mstar - 1.) / (0.3 - 1.)
# beta_43 is always larger than beta_53
self._beta = self._betas['5-3'] + (
self._betas['4-3'] - self._betas['5-3']) * (1. - gfrac)
elif 15 < self._Mstar < 22:
# region going from gamma = 4/3 to gamma = 5/3 as mass increases
gamma_interp = True
gammas = self._gammas
# gfrac should == 0 for 4/3; == 1 for 5/3
gfrac = (self._Mstar - 15.) / (22. - 15.)
# beta_43 is always larger than beta_53
self._beta = self._betas['5-3'] + (
self._betas['4-3'] - self._betas['5-3']) * (1. - gfrac)
timedict = {} # will hold time arrays for each g in gammas
dmdtdict = {} # will hold dmdt arrays for each g in gammas
for g in gammas:
# find simulation betas to interpolate between
for i in range(len(self._sim_beta[g])):
if self._betas[g] == self._sim_beta[g][i]:
# no need to interp, already have dmdt & t for this beta
beta_interp = False
interp_index_low = i
break
if self._betas[g] < self._sim_beta[g][i]:
interp_index_high = i
interp_index_low = i - 1
beta_interp = True
break
if beta_interp:
# ----------- LINEAR BETA INTERPOLATION --------------
# get new dmdts (2 arrays, pre & post peak (peak in array 2))
# use interp_index_low bc of how slope and yintercept are saved
# (slope[0] corresponds to between beta[0] and beta[1] etc.)
dmdt = np.array([
self._beta_yinter[g][interp_index_low][0] +
self._beta_slope[g][interp_index_low][0] * self._betas[g],
self._beta_yinter[g][interp_index_low][1] +
self._beta_slope[g][interp_index_low][1] * self._betas[g]])
# map mapped_times back to actual times, requires interpolation
# in time
# first for pre peak times
time = []
for i in [0, 1]:
# interp_index_low indexes beta
# mapped time between beta low and beta high
time_betalo = (
self._mapped_time[g][interp_index_low][i] *
(self._premaptime[g][interp_index_low][i][-1] -
self._premaptime[g][interp_index_low][i][0]) +
self._premaptime[g][interp_index_low][i][0])
time_betahi = (
self._mapped_time[g][interp_index_low][i] *
(self._premaptime[g][interp_index_high][i][-1] -
self._premaptime[g][interp_index_high][i][0]) +
self._premaptime[g][interp_index_high][i][0])
time.append(
time_betalo + (time_betahi - time_betalo) *
(self._betas[g] -
self._sim_beta[g][interp_index_low]) /
(self._sim_beta[g][interp_index_high] -
self._sim_beta[g][interp_index_low]))
time = np.array(time)
timedict[g] = time
dmdtdict[g] = dmdt
elif not beta_interp:
timedict[g] = np.copy(self._premaptime[g][interp_index_low])
dmdtdict[g] = np.copy(self._premapdmdt[g][interp_index_low])
# ---------------- GAMMA INTERPOLATION -------------------
if gamma_interp:
mapped_time = {'4-3': [], '5-3': []}
time = []
dmdt = []
for j in [0, 1]: # once before peak, once after peak
# choose more densely sampled curve to map times to 0-1
# less densely sampled curve will be interpolated to match
if len(timedict['4-3'][j]) < len(timedict['5-3'][j]):
# gamma = 5/3 array more densely sampled
interp = '4-3'
nointerp = '5-3'
else:
# will also catch case where they have the same lengths
interp = '5-3'
nointerp = '4-3'
# map times from more densely sampled curves
# (both pre & post peak, might be from diff. dmdts)
# to 0 - 1
mapped_time[nointerp].append(
1. / (timedict[nointerp][j][-1] -
timedict[nointerp][j][0]) *
(timedict[nointerp][j] - timedict[nointerp][j][0]))
mapped_time[interp].append(
1. / (timedict[interp][j][-1] - timedict[interp][j][0]) *
(timedict[interp][j] - timedict[interp][j][0]))
# ensure bounds same for interp & nointerp before interpolation
# (they should be 0 and 1 from above, but could be slightly off
# due to rounding errors in python)
mapped_time[interp][j][0] = 0
mapped_time[interp][j][-1] = 1
mapped_time[nointerp][j][0] = 0
mapped_time[nointerp][j][-1] = 1
func = interp1d(mapped_time[interp][j], dmdtdict[interp][j])
dmdtdict[interp][j] = func(mapped_time[nointerp][j])
# recall gfrac = 0 --> gamma = 4/3, gfrac = 1 --> gamma 5/3
if interp == '5-3':
# then mapped_time = mapped_time[nointerp] =
# mapped_time['4-3']
time53 = (mapped_time['4-3'][j] * (timedict['5-3'][j][-1] -
timedict['5-3'][j][0]) +
timedict['5-3'][j][0])
# convert back from logspace before adding to time array
time.extend(10 ** (timedict['4-3'][j] +
(time53 - timedict['4-3'][j]) * gfrac))
else:
# interp == '4-3'
time43 = (mapped_time['5-3'][j] * (timedict['4-3'][j][-1] -
timedict['4-3'][j][0]) +
timedict['4-3'][j][0])
# convert back from logspace before adding to time array
time.extend(10 ** (time43 +
(timedict['5-3'][j] - time43) * gfrac))
# recall gfrac = 0 --> gamma = 4/3, gfrac = 1 --> gamma 5/3
# convert back from logspace before adding to dmdt array
dmdt.extend(10 ** (dmdtdict['4-3'][j] +
(dmdtdict['5-3'][j] -
dmdtdict['4-3'][j]) * gfrac))
else: # gamma_interp == False
# in this case, g will still be g from loop over gammas,
# but there was only one gamma (no interpolation),
# so g is the correct gamma
# note that timedict[g] is a list not an array
# no longer need a prepeak and postpeak array
time = np.concatenate((timedict[g][0], timedict[g][1]))
time = 10 ** time
dmdt = np.concatenate((dmdtdict[g][0], dmdtdict[g][1]))
dmdt = 10 ** dmdt
time = np.array(time)
dmdt = np.array(dmdt)
# ----------- SCALE dm/dt TO BH & STAR MASS & STAR RADIUS -------------
if 'dense_times' in kwargs:
self._times = kwargs['dense_times'] # time in days
else:
print('in fallback, dense_times NOT in kwargs')
self._times = kwargs['rest_times']
# bh mass for dmdt's in astrocrash is 1e6 solar masses
# dmdt ~ Mh^(-1/2)
self._Mh = kwargs['bhmass'] # in units of solar masses
# Assume that BDs below 0.1 solar masses are n=1 polytropes
if self._Mstar < 0.1:
Mstar_Tout = 0.1
else:
Mstar_Tout = self._Mstar
# calculate Rstar from Mstar (using Tout et. al. 1996),
# in Tout paper -> Z = 0.02 (now not quite solar Z) and ZAMS
Z = 0.0134 # assume solar metallicity
log10_Z_02 = np.log10(Z / 0.02)
# Tout coefficients for calculating Rstar
Tout_theta = (1.71535900 + 0.62246212 * log10_Z_02 - 0.92557761 *
log10_Z_02 ** 2 - 1.16996966 * log10_Z_02 ** 3 -
0.30631491 *
log10_Z_02 ** 4)
Tout_l = (6.59778800 - 0.42450044 * log10_Z_02 - 12.13339427 *
log10_Z_02 ** 2 - 10.73509484 * log10_Z_02 ** 3 -
2.51487077 * log10_Z_02 ** 4)
Tout_kpa = (10.08855000 - 7.11727086 * log10_Z_02 - 31.67119479 *
log10_Z_02 ** 2 - 24.24848322 * log10_Z_02 ** 3 -
5.33608972 * log10_Z_02 ** 4)
Tout_lbda = (1.01249500 + 0.32699690 * log10_Z_02 - 0.00923418 *
log10_Z_02 ** 2 - 0.03876858 * log10_Z_02 ** 3 -
0.00412750 * log10_Z_02 ** 4)
Tout_mu = (0.07490166 + 0.02410413 * log10_Z_02 + 0.07233664 *
log10_Z_02 ** 2 + 0.03040467 * log10_Z_02 ** 3 +
0.00197741 * log10_Z_02 ** 4)
Tout_nu = 0.01077422
Tout_eps = (3.08223400 + 0.94472050 * log10_Z_02 - 2.15200882 *
log10_Z_02 ** 2 - 2.49219496 * log10_Z_02 ** 3 -
0.63848738 * log10_Z_02 ** 4)
Tout_o = (17.84778000 - 7.45345690 * log10_Z_02 - 48.9606685 *
log10_Z_02 ** 2 - 40.05386135 * log10_Z_02 ** 3 -
9.09331816 * log10_Z_02 ** 4)
Tout_pi = (0.00022582 - 0.00186899 * log10_Z_02 + 0.00388783 *
log10_Z_02 ** 2 + 0.00142402 * log10_Z_02 ** 3 -
0.00007671 * log10_Z_02 ** 4)
# caculate Rstar in units of Rsolar
Rstar = ((Tout_theta * Mstar_Tout ** 2.5 + Tout_l *
Mstar_Tout ** 6.5 +
Tout_kpa * Mstar_Tout ** 11 + Tout_lbda *
Mstar_Tout ** 19 +
Tout_mu * Mstar_Tout ** 19.5) /
(Tout_nu + Tout_eps * Mstar_Tout ** 2 + Tout_o *
Mstar_Tout ** 8.5 + Mstar_Tout ** 18.5 + Tout_pi *
Mstar_Tout ** 19.5))
dmdt = (dmdt * np.sqrt(Mhbase / self._Mh) *
(self._Mstar / Mstarbase) ** 2.0 * (Rstarbase / Rstar) ** 1.5)
# tpeak ~ Mh^(1/2) * Mstar^(-1)
time = (time * np.sqrt(self._Mh / Mhbase) * (Mstarbase / self._Mstar) *
(Rstar / Rstarbase) ** 1.5)
time = time / DAY_CGS # time is now in days to match self._times
tfallback = np.copy(time[0])
self._rest_t_explosion = kwargs['resttexplosion'] # units = days
# ----------- EXTRAPOLATE dm/dt TO EARLY TIMES -------------
# use power law to fit : dmdt = b*t^xi
if self.EXTRAPOLATE and self._rest_t_explosion > self._times[0]:
dfloor = min(dmdt) # will be at late times if using James's
# simulaiton data (which already has been late time extrap.)
# not within 1% of floor, extrapolate --> NECESSARY?
if dmdt[0] >= dfloor * 1.01:
# try shifting time before extrapolation to make power law drop
# off more suddenly around tfallback
time = time + 0.9 * tfallback
# this will ensure extrapolation will extend back to first
# transient time.
# requires self._rest_t_explosion > self._times[0]
# time = (time - tfallback + self._rest_t_explosion -
# self._times[0])
ipeak = np.argmax(dmdt) # index of peak
# the following makes sure there is enough prepeak sampling for
# good extrapolation
if ipeak < 1000:
prepeakfunc = interp1d(time[:ipeak], dmdt[:ipeak])
prepeaktimes = np.logspace(np.log10(time[0]),
np.log10(time[ipeak - 1]), 1000)
# prepeaktimes = np.linspace(time[0], time[ipeak - 1],
# num=1000)
if prepeaktimes[-1] > time[ipeak - 1]:
prepeaktimes[-1] = time[ipeak - 1]
if prepeaktimes[0] < time[0]:
prepeaktimes[0] = time[0]
prepeakdmdt = prepeakfunc(prepeaktimes)
else:
prepeaktimes = time[:ipeak]
prepeakdmdt = dmdt[:ipeak]
start = 0
# last index of first part of data used to get power law fit
index1 = int(len(prepeakdmdt) * 0.1)
# last index of second part of data used to get power law fit
index2 = int(len(prepeakdmdt) * 0.15)
t1 = prepeaktimes[start:index1]
d1 = prepeakdmdt[start:index1]
t2 = prepeaktimes[index2 - (index1 - start):index2]
d2 = prepeakdmdt[index2 - (index1 - start):index2]
# exponent for power law fit
xi = np.log(d1 / d2) / np.log(t1 / t2)
xiavg = np.mean(xi)
# multiplicative factor for power law fit
b1 = d1 / (t1 ** xiavg)
bavg = np.mean(b1)
tfloor = 0.01 + 0.9 * tfallback # want first time ~0 (0.01)
indexext = len(time[time < prepeaktimes[index1]])
textp = np.linspace(tfloor, time[int(indexext)], num=ipeak * 5)
dextp = bavg * (textp ** xiavg)
time = np.concatenate((textp, time[int(indexext) + 1:]))
time = time - 0.9 * tfallback # shift back to original times
dmdt = np.concatenate((dextp, dmdt[int(indexext) + 1:]))
# try aligning first fallback time of simulation
# (whatever first time is before early t extrapolation)
# with parameter texplosion
time = time - tfallback + self._rest_t_explosion
tpeak = time[np.argmax(dmdt)]
timeinterpfunc = interp1d(time, dmdt)
lengthpretimes = len(np.where(self._times < time[0])[0])
lengthposttimes = len(np.where(self._times > time[-1])[0])
# this removes all extrapolation by interp1d by setting dmdtnew = 0
# outside bounds of self._times
dmdt1 = np.zeros(lengthpretimes)
dmdt3 = np.zeros(lengthposttimes)
# include len(self._times) instead of just using -lengthposttimes
# for indexing in case lengthposttimes == 0
dmdt2 = timeinterpfunc(self._times[lengthpretimes:(len(self._times) -
lengthposttimes)])
dmdtnew = np.append(dmdt1, dmdt2)
dmdtnew = np.append(dmdtnew, dmdt3)
dmdtnew[dmdtnew < 0] = 0 # set floor for dmdt
self._efficiency = kwargs['efficiency']
# luminosities in erg/s
luminosities = (self._efficiency * dmdtnew *
c.c.cgs.value * c.c.cgs.value)
# -------------- EDDINGTON LUMINOSITY CUT -------------------
# Assume solar metallicity for now
# 0.2*(1 + X) = mean Thomson opacity
kappa_t = 0.2 * (1 + 0.74)
Ledd = (FOUR_PI * c.G.cgs.value * self._Mh * M_SUN_CGS *
C_CGS / kappa_t)
# 2 options for soft Ledd cuts, try both & see what fits stuff better
# luminosities = np.where(
# luminosities > Ledd, (1. + np.log10(luminosities/Ledd)) * Ledd,
# luminosities)
luminosities = (luminosities * Ledd / (luminosities + Ledd))
return {'dense_luminosities': luminosities, 'Rstar': Rstar,
'tpeak': tpeak, 'beta': self._beta, 'starmass': self._Mstar,
'dmdt': dmdtnew, 'Ledd': Ledd, 'tfallback': float(tfallback)}
|
|
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
from neutron.api.v2 import attributes as attrs
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import api as qdbapi
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as st_db
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer import agent_scheduler
from neutron.services import provider_configuration as pconf
from neutron.services import service_base
LOG = logging.getLogger(__name__)
class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
agent_scheduler.LbaasAgentSchedulerDbMixin):
"""Implementation of the Neutron Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaas",
"lbaas_agent_scheduler",
"service-type"]
# lbaas agent notifiers to handle agent update operations;
# can be updated by plugin drivers while loading;
# will be extracted by neutron manager when loading service plugins;
agent_notifiers = {}
def __init__(self):
"""Initialization for the loadbalancer service plugin."""
qdbapi.register_models()
self.service_type_manager = st_db.ServiceTypeManager.get_instance()
self._load_drivers()
def _load_drivers(self):
"""Loads plugin-drivers specified in configuration."""
self.drivers, self.default_provider = service_base.load_drivers(
constants.LOADBALANCER, self)
# we're at the point when extensions are not loaded yet
# so prevent policy from being loaded
ctx = context.get_admin_context(load_admin_roles=False)
# stop service in case provider was removed, but resources were not
self._check_orphan_pool_associations(ctx, self.drivers.keys())
def _check_orphan_pool_associations(self, context, provider_names):
"""Checks remaining associations between pools and providers.
If admin has not undeployed resources with provider that was deleted
from configuration, neutron service is stopped. Admin must delete
resources prior to removing providers from configuration.
"""
pools = self.get_pools(context)
lost_providers = set([pool['provider'] for pool in pools
if pool['provider'] not in provider_names])
# resources are left without provider - stop the service
if lost_providers:
msg = _("Delete associated loadbalancer pools before "
"removing providers %s") % list(lost_providers)
LOG.exception(msg)
raise SystemExit(msg)
def _get_driver_for_provider(self, provider):
if provider in self.drivers:
return self.drivers[provider]
# raise if not associated (should never be reached)
raise n_exc.Invalid(_("Error retrieving driver for provider %s") %
provider)
def _get_driver_for_pool(self, context, pool_id):
pool = self.get_pool(context, pool_id)
try:
return self.drivers[pool['provider']]
except KeyError:
raise n_exc.Invalid(_("Error retrieving provider for pool %s") %
pool_id)
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return "Neutron LoadBalancer Service Plugin"
def create_vip(self, context, vip):
v = super(LoadBalancerPlugin, self).create_vip(context, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.create_vip(context, v)
return v
def update_vip(self, context, id, vip):
if 'status' not in vip['vip']:
vip['vip']['status'] = constants.PENDING_UPDATE
old_vip = self.get_vip(context, id)
v = super(LoadBalancerPlugin, self).update_vip(context, id, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.update_vip(context, old_vip, v)
return v
def _delete_db_vip(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_vip(context, id)
def delete_vip(self, context, id):
self.update_status(context, ldb.Vip,
id, constants.PENDING_DELETE)
v = self.get_vip(context, id)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.delete_vip(context, v)
def _get_provider_name(self, context, pool):
if ('provider' in pool and
pool['provider'] != attrs.ATTR_NOT_SPECIFIED):
provider_name = pconf.normalize_provider_name(pool['provider'])
self.validate_provider(provider_name)
return provider_name
else:
if not self.default_provider:
raise pconf.DefaultServiceProviderNotFound(
service_type=constants.LOADBALANCER)
return self.default_provider
def create_pool(self, context, pool):
provider_name = self._get_provider_name(context, pool['pool'])
p = super(LoadBalancerPlugin, self).create_pool(context, pool)
self.service_type_manager.add_resource_association(
context,
constants.LOADBALANCER,
provider_name, p['id'])
#need to add provider name to pool dict,
#because provider was not known to db plugin at pool creation
p['provider'] = provider_name
driver = self.drivers[provider_name]
driver.create_pool(context, p)
return p
def update_pool(self, context, id, pool):
if 'status' not in pool['pool']:
pool['pool']['status'] = constants.PENDING_UPDATE
old_pool = self.get_pool(context, id)
p = super(LoadBalancerPlugin, self).update_pool(context, id, pool)
driver = self._get_driver_for_provider(p['provider'])
driver.update_pool(context, old_pool, p)
return p
def _delete_db_pool(self, context, id):
# proxy the call until plugin inherits from DBPlugin
# rely on uuid uniqueness:
with context.session.begin(subtransactions=True):
self.service_type_manager.del_resource_associations(context, [id])
super(LoadBalancerPlugin, self).delete_pool(context, id)
def delete_pool(self, context, id):
self.update_status(context, ldb.Pool,
id, constants.PENDING_DELETE)
p = self.get_pool(context, id)
driver = self._get_driver_for_provider(p['provider'])
driver.delete_pool(context, p)
def create_member(self, context, member):
m = super(LoadBalancerPlugin, self).create_member(context, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.create_member(context, m)
return m
def update_member(self, context, id, member):
if 'status' not in member['member']:
member['member']['status'] = constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(LoadBalancerPlugin, self).update_member(context, id, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.update_member(context, old_member, m)
return m
def _delete_db_member(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_member(context, id)
def delete_member(self, context, id):
self.update_status(context, ldb.Member,
id, constants.PENDING_DELETE)
m = self.get_member(context, id)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.delete_member(context, m)
def create_health_monitor(self, context, health_monitor):
hm = super(LoadBalancerPlugin, self).create_health_monitor(
context,
health_monitor
)
return hm
def update_health_monitor(self, context, id, health_monitor):
old_hm = self.get_health_monitor(context, id)
hm = super(LoadBalancerPlugin, self).update_health_monitor(
context,
id,
health_monitor
)
with context.session.begin(subtransactions=True):
qry = context.session.query(
ldb.PoolMonitorAssociation
).filter_by(monitor_id=hm['id']).join(ldb.Pool)
for assoc in qry:
driver = self._get_driver_for_pool(context, assoc['pool_id'])
driver.update_health_monitor(context, old_hm,
hm, assoc['pool_id'])
return hm
def _delete_db_pool_health_monitor(self, context, hm_id, pool_id):
super(LoadBalancerPlugin, self).delete_pool_health_monitor(context,
hm_id,
pool_id)
def _delete_db_health_monitor(self, context, id):
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
def delete_health_monitor(self, context, id):
with context.session.begin(subtransactions=True):
hm = self.get_health_monitor(context, id)
qry = context.session.query(
ldb.PoolMonitorAssociation
).filter_by(monitor_id=id).join(ldb.Pool)
for assoc in qry:
driver = self._get_driver_for_pool(context, assoc['pool_id'])
driver.delete_pool_health_monitor(context,
hm,
assoc['pool_id'])
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
retval = super(LoadBalancerPlugin, self).create_pool_health_monitor(
context,
health_monitor,
pool_id
)
monitor_id = health_monitor['health_monitor']['id']
hm = self.get_health_monitor(context, monitor_id)
driver = self._get_driver_for_pool(context, pool_id)
driver.create_pool_health_monitor(context, hm, pool_id)
return retval
def delete_pool_health_monitor(self, context, id, pool_id):
self.update_pool_health_monitor(context, id, pool_id,
constants.PENDING_DELETE)
hm = self.get_health_monitor(context, id)
driver = self._get_driver_for_pool(context, pool_id)
driver.delete_pool_health_monitor(context, hm, pool_id)
def stats(self, context, pool_id):
driver = self._get_driver_for_pool(context, pool_id)
stats_data = driver.stats(context, pool_id)
# if we get something from the driver -
# update the db and return the value from db
# else - return what we have in db
if stats_data:
super(LoadBalancerPlugin, self).update_pool_stats(
context,
pool_id,
stats_data
)
return super(LoadBalancerPlugin, self).stats(context,
pool_id)
def populate_vip_graph(self, context, vip):
"""Populate the vip with: pool, members, healthmonitors."""
pool = self.get_pool(context, vip['pool_id'])
vip['pool'] = pool
vip['members'] = [self.get_member(context, member_id)
for member_id in pool['members']]
vip['health_monitors'] = [self.get_health_monitor(context, hm_id)
for hm_id in pool['health_monitors']]
return vip
def validate_provider(self, provider):
if provider not in self.drivers:
raise pconf.ServiceProviderNotFound(
provider=provider, service_type=constants.LOADBALANCER)
|
|
# antioch
# Copyright (c) 1999-2019 Phil Christensen
#
#
# See LICENSE for details
"""
Parse command strings sent by the client.
This parser can understand a variety of phrases, but they are all represented
by the (BNF?) form:
<verb>[[[<dobj spec> ]<direct-object> ]+[<prep> [<pobj spec> ]<object-of-the-preposition>]*]
There are a long list of prepositions supported, some of which are interchangeable.
"""
import sys, time, re, string, types, logging
from antioch.core import exchange, interface, errors
from antioch.core.errors import *
log = logging.getLogger(__name__)
URL_REGEXP = r'(?P<scheme>[+a-z0-9]+)\:(\/\/)?'
URL_REGEXP += r'((?P<user>\w+?)(\:(?P<passwd>\w+?))?\@)?'
URL_REGEXP += r'(?P<host>[\._\-a-z0-9]+)(\:(?P<port>\d+)?)?'
URL_REGEXP += r'(?P<path>/[^\s;?#]*)(;(?P<params>[^\s?#]*))?'
URL_REGEXP += r'(\?(?P<query>[^\s#]*))?(\#(?P<fragment>[^\s]*))?'
URL_RE = re.compile(URL_REGEXP, re.IGNORECASE)
class URL(dict):
def __init__(self, source):
match = URL_RE.match(source)
self.update(match.groupdict())
self.source = str(source)
def __str__(self):
return self.source
#Here are all our supported prepositions
preps = [['with', 'using'],
['at', 'to'],
['in front of'],
['in', 'inside', 'into', 'within'],
['on top of', 'on', 'onto', 'upon', 'above'],
['out of', 'from inside', 'from'],
['over'],
['through'],
['under', 'underneath', 'beneath', 'below'],
['around', 'round'],
['between', 'among'],
['behind', 'past'],
['beside', 'by', 'near', 'next to', 'along'],
['for', 'about'],
#['is'],
['as'],
['off', 'off of']]
prepstring = ""
for item in preps:
prepstring += "|".join(item)
if(item != preps[len(preps) - 1]):
prepstring += "|"
PREP_SRC = r'(?:\b)(?P<prep>' + prepstring + r')(?:\b)'
SPEC = r"(?P<spec_str>my|the|a|an|\S+(?:\'s|s\'))"
PHRASE_SRC = r'(?:' + SPEC + r'\s)?(?P<obj_str>.+)'
PREP = re.compile(PREP_SRC)
PHRASE = re.compile(PHRASE_SRC)
POBJ_TEST = re.compile(PREP_SRC + "\s" + PHRASE_SRC)
MULTI_WORD = re.compile(r'((\"|\').+?(?!\\).\2)|(\S+)')
def parse(caller, sentence, debug=False):
"""
For a given user, execute a command.
"""
t = dict(time=time.time())
def _profile(name):
if(debug):
log.debug("%s took %4f seconds" % (
name, time.time() - t['time']
))
t['time'] = time.time()
l = Lexer(sentence)
_profile('lexer')
p = TransactionParser(l, caller, caller.get_exchange())
_profile('parser')
v = p.get_verb()
_profile('verb search')
v.execute(p)
_profile('execution')
def get_default_parser(v):
"""
A default parser is used by Verbs to support __call__ usage
"""
x = v.get_exchange()
l = Lexer(v.name)
p = TransactionParser(l, x.get_context(), x)
p.verb = v
p.this = v.get_source()
return p
class Lexer(object):
"""
An instance of this class will identify the various parts of a imperitive
sentence. This may be of use to verb code, as well.
"""
def __init__(self, command):
self.command = command
self.dobj_str = None
self.dobj_spec_str = None
# First, find all words or double-quoted-strings in the text
iterator = re.finditer(MULTI_WORD, command)
self.words = []
qotd_matches = []
for item in iterator:
if(item.group(1)):
qotd_matches.append(item)
word = item.group().strip('\'"').replace("\\'", "'").replace("\\\"", "\"")
self.words.append(word)
# Now, find all prepositions
iterator = re.finditer(PREP, command)
prep_matches = []
for item in iterator:
prep_matches.append(item)
#this method will be used to filter out prepositions inside quotes
def nonoverlap(item):
(start, end) = item.span()
for word in qotd_matches:
(word_start, word_end) = word.span()
if(start > word_start and start < word_end):
return False
elif(end > word_start and end < word_end):
return False
return True
#nonoverlap() will leave only true non-quoted prepositions
prep_matches = list(filter(nonoverlap, prep_matches))
#determine if there is anything after the verb
if(len(self.words) > 1):
#if there are prepositions, we only look for direct objects
#until the first preposition
if(prep_matches):
end = prep_matches[0].start()-1
else:
end = len(command)
#this is the phrase, which could be [[specifier ]object]
dobj_phrase = command[len(self.words[0]) + 1:end]
match = re.match(PHRASE, dobj_phrase)
if(match):
result = match.groupdict()
self.dobj_str = result['obj_str'].strip('\'"').replace("\\'", "'").replace("\\\"", "\"")
if(result['spec_str']):
self.dobj_spec_str = result['spec_str'].strip('\'"').replace("\\'", "'").replace("\\\"", "\"")
else:
self.dobj_spec_str = ''
self.prepositions = {}
#iterate through all the prepositional phrase matches
for index in range(len(prep_matches)):
start = prep_matches[index].start()
#if this is the last preposition, then look from here until the end
if(index == len(prep_matches) - 1):
end = len(command)
#otherwise, search until the next preposition starts
else:
end = prep_matches[index + 1].start() - 1
prep_phrase = command[start:end]
phrase_match = re.match(POBJ_TEST, prep_phrase)
if not(phrase_match):
continue
result = phrase_match.groupdict()
#if we get a quoted string here, strip the quotes
result['obj_str'] = result['obj_str'].strip('\'"').replace("\\'", "'").replace("\\\"", "\"")
if(result['spec_str'] is None):
result['spec_str'] = ''
#if there is already a entry for this preposition, we turn it into
#a list, and if it already is one, we append to it
if(result['prep'] in self.prepositions):
item = self.prepositions[result['prep']]
if not(isinstance(item[0], list)):
self.prepositions[result['prep']] = [[result['spec_str'], result['obj_str'], None], item]
else:
self.prepositions[result['prep']].append([result['spec_str'], result['obj_str'], None])
#if it's a new preposition, we just save it here.
else:
self.prepositions[result['prep']] = [result['spec_str'], result['obj_str'], None]
def get_details(self):
return dict(
command = self.command,
dobj_str = self.dobj_str,
dobj_spec_str = self.dobj_spec_str,
words = self.words,
prepositions = self.prepositions,
)
class TransactionParser(object):
"""
The parser instance is created by the avatar. A new instance is created
for each remote call to perspective_parse.
"""
def __init__(self, lexer, caller, exchange):
"""
Create a new parser object for the given command, as issued by
the given caller, using the registry.
"""
self.lexer = lexer
self.caller = caller
self.exchange = exchange
self.this = None
self.verb = None
if(self.lexer):
for key, value in list(self.lexer.get_details().items()):
self.__dict__[key] = value
for prep in self.prepositions:
prep_record_list = self.prepositions[prep]
if not(isinstance(prep_record_list[0], list)):
prep_record_list = [prep_record_list]
for record in prep_record_list:
#look for an object with this name/specifier
obj = self.find_object(record[0], record[1])
#try again (maybe it just looked like a specifier)
if(not obj and record[0]):
record[1] = record[0] + ' ' + record[1]
record[0] = ''
obj = self.find_object(record[0], record[1])
#one last shot for pronouns
if not(obj):
obj = self.get_pronoun_object(record[1])
record[2] = obj
if(hasattr(self, 'dobj_str') and self.dobj_str):
#look for an object with this name/specifier
self.dobj = self.find_object(self.dobj_spec_str, self.dobj_str)
#try again (maybe it just looked like a specifier)
if(not self.dobj and self.dobj_spec_str):
self.dobj_str = self.dobj_spec_str + ' ' + self.dobj_str
self.dobj_spec_str = ''
self.dobj = self.find_object(None, self.dobj_str)
#if there's nothing with this name, then we look for
#pronouns before giving up
if not(self.dobj):
self.dobj = self.get_pronoun_object(self.dobj_str)
else:
#didn't find anything, probably because nothing was there.
self.dobj = None
self.dobj_str = None
def get_environment(self):
"""
Return a dictionary of environment variables supplied by the parser results.
"""
return dict(
parser = self,
command = self.command,
caller = self.caller,
dobj = self.dobj,
dobj_str = self.dobj_str,
dobj_spec_str = self.dobj_spec_str,
words = self.words,
prepositions = self.prepositions,
this = self.this,
self = self.verb,
system = self.exchange.get_object(1),
here = self.caller.get_location() if self.caller else None,
get_dobj = self.get_dobj,
get_dobj_str = self.get_dobj_str,
has_dobj = self.has_dobj,
has_dobj_str = self.has_dobj_str,
get_pobj = self.get_pobj,
get_pobj_str = self.get_pobj_str,
has_pobj = self.has_pobj,
has_pobj_str = self.has_pobj_str,
)
def find_object(self, specifier, name, return_list=False):
"""
Look for an object, with the optional specifier, in the area
around the person who entered this command. If the posessive
form is used (i.e., "Bill's spoon") and that person is not
here, a NoSuchObjectError is thrown for that person.
"""
result = None
search = None
if(specifier == 'my'):
search = self.caller
elif(specifier and specifier.find("'") != -1):
person = specifier[0:specifier.index("'")]
location = self.caller.get_location()
if(location):
search = location.find(person)
else:
search = self.caller.get_location()
if(name and search):
result = search.find(name)
if(isinstance(result, interface.Object)):
return result
elif(return_list):
return result
elif(not result):
return None
else:
raise errors.AmbiguousObjectError(name, result)
def get_verb(self):
"""
Determine the most likely verb for this sentence. There is a search
order for verbs, as follows::
Caller->Caller's Contents->Location->Items in Location->
Direct Object->Objects of the Preposition
"""
if not(self.words):
raise NoSuchVerbError('parser: ' + self.command)
if(getattr(self, 'verb', None) is not None):
return self.verb
verb_str = self.words[0]
matches = []
ctx = self.caller
checks = [self.caller]
checks.extend(self.caller.get_contents())
location = self.caller.get_location()
if(location):
checks.append(location)
checks.extend(location.get_contents())
checks.append(self.dobj)
for key in self.prepositions:
# if there were multiple uses of a preposition
if(isinstance(self.prepositions[key][0], list)):
# then check each one for a verb
checks.extend([pobj[2] for pobj in self.prepositions[key] if pobj[2]])
else:
checks.append(self.prepositions[key][2])
matches = [x for x in checks if x and x.has_verb(verb_str)]
self.this = self.filter_matches(matches)
if(isinstance(self.this, list)):
if(len(self.this) > 1):
raise AmbiguousVerbError(verb_str, self.this)
elif(len(self.this) == 0):
self.this = None
else:
self.this = self.this[0]
if not(self.this):
raise NoSuchVerbError('parser: ' + verb_str)
#print "Verb found on: " + str(self.this)
self.verb = self.this.get_verb(self.words[0], recurse=True)
return self.verb
def filter_matches(self, possible):
result = []
# print "possble is " + str(possible)
if not(isinstance(possible, list)):
possible = [possible]
verb_str = self.words[0]
for item in possible:
if(item is None):
continue
if(item in result):
continue
verb = item.get_verb(verb_str)
if(not verb.performable_by(self.caller)):
continue
if(verb.is_ability() and item.get_id() != self.caller.get_id()):
continue
result.append(item)
# print "result is " + str(result)
return result
def get_pronoun_object(self, pronoun):
"""
Return the correct object for various pronouns.
Also, a object number (starting with a #) will
return the object for that id.
"""
ctx = self.caller
if(pronoun == "me"):
return self.caller
elif(pronoun == "here"):
return self.caller.get_location()
# elif(pronoun == "this"):
# return self.caller.get_observing(ctx)
elif(pronoun[0] == "#"):
return self.exchange.get_object(pronoun)
else:
return None
def get_dobj(self):
"""
Get the direct object for this parser. If there was no
direct object found, raise a NoSuchObjectError
"""
if not(self.dobj):
raise NoSuchObjectError(self.dobj_str)
return self.dobj
def get_pobj(self, prep):
"""
Get the object for the given preposition. If there was no
object found, raise a NoSuchObjectError; if the preposition
was not found, raise a NoSuchPrepositionError.
"""
if not(prep in self.prepositions):
raise NoSuchPrepositionError(prep)
if(isinstance(self.prepositions[prep][0], list)):
matches = []
for item in self.prepositions[prep]:
if(item[2]):
matches.append(item[2])
if(len(matches) > 1):
raise AmbiguousObjectError(matches[0][1], matches)
elif not(matches):
raise NoSuchObjectError(self.prepositions[prep][0][1])
if not(self.prepositions[prep][2]):
raise NoSuchObjectError(self.prepositions[prep][1])
return self.prepositions[prep][2]
def get_dobj_str(self):
"""
Get the direct object **string** for this parser. If there was no
direct object **string** found, raise a NoSuchObjectError
"""
if not(self.dobj_str):
raise NoSuchObjectError('direct object')
return self.dobj_str
def get_pobj_str(self, prep, return_list=False):
"""
Get the object **string** for the given preposition. If there was no
object **string** found, raise a NoSuchObjectError; if the preposition
was not found, raise a NoSuchPrepositionError.
"""
if not(prep in self.prepositions):
raise NoSuchPrepositionError(prep)
if(isinstance(self.prepositions[prep][0], list)):
matches = []
for item in self.prepositions[prep]:
if(item[1]):
matches.append(item[1])
if(len(matches) > 1):
if(return_list):
return matches
else:
raise matches[0]
elif not(matches):
raise NoSuchObjectError(self.prepositions[prep][0][1])
return self.prepositions[prep][1]
def get_pobj_spec_str(self, prep, return_list=False):
"""
Get the object **specifier** for the given preposition. If there was no
object **specifier** found, return the empty string; if the preposition
was not found, raise a NoSuchPrepositionError.
"""
if not(prep in self.prepositions):
raise NoSuchPrepositionError(prep)
if(isinstance(self.prepositions[prep][0], list)):
matches = []
for item in self.prepositions[prep]:
matches.append(item[0])
if(len(matches) > 1):
if(return_list):
return matches
else:
return matches[0]
return self.prepositions[prep][0]
def has_dobj(self):
"""
Was a direct object found?
"""
return self.dobj is not None
def has_pobj(self, prep):
"""
Was an object for this preposition found?
"""
if(prep not in self.prepositions):
return False
found_prep = False
if(isinstance(self.prepositions[prep][0], list)):
for item in self.prepositions[prep]:
if(item[2]):
found_prep = True
break
else:
found_prep = bool(self.prepositions[prep][2])
return found_prep
def has_dobj_str(self):
"""
Was a direct object string found?
"""
return self.dobj_str != None
def has_pobj_str(self, prep):
"""
Was a object string for this preposition found?
"""
if(prep not in self.prepositions):
return False
found_prep = False
if(isinstance(self.prepositions[prep][0], list)):
for item in self.prepositions[prep]:
if(item[1]):
found_prep = True
break
else:
found_prep = bool(self.prepositions[prep][1])
return found_prep
|
|
"""
Jonathan Reem
Implementation of Monads from Haskell in Python as a "copy" of
Control.Monad from the GHC libraries.
docstrings of functions heavily influenced by Control.Monad
"""
# pylint: disable=C0322, C0103, R0921, R0922, W0141, W0142
import func
import infix
def monadize(monad):
"Decorator for creating a monad."
monad.then = lambda s, se: s >= (lambda a: se)
monad.__ge__ = monad.bind # >= is Haskell's >>=
monad.__lshift__ = func.flip(monad.bind) # << is Haskell's =<<
monad.__rshift__ = monad.then # >> is Haskell's >>
return monad
@monadize
class Monad(object):
"""
Monad operators should have the following types:
bind :: Monad(a) -> (a -> Monad(b)) -> Monad(b)
then :: Monad(a) -> Monad(b) -> Monad(b)
return_m :: a -> Monad(a)
Monad laws:
return_m(a) >= f == f(a) -- Left Identity
m >= return_m == m -- Right Identity
(m >= f) >= g == m >= (lambda x: f(x) >= g) -- Associativity
For further info on Monad Laws, see:
http://www.haskell.org/haskellwiki/Monad_law
Using the |mcompl| and |mcompr| operators, we can write the
Monad Laws in a way that might be a bit clearer:
return_m |mcompr| g == g
f |mcompr| return_m == f
(f |mcompr| g) |mcompr| h == f |mcompr| (g |mcompr| h)
If these laws are satisfied, then the Monad forms a mathematical category
from Category theory, which makes lots of things convenient.
"""
def bind(self, bindee):
"Equivalent to Haskell's >>="
raise NotImplementedError(
"Your Monad must define its own bind.")
@classmethod
def return_m(cls, value):
"Equivalent to Haskell's return"
raise NotImplementedError(
"Your monad must implement return_m.")
class MonadPlus(object): # Monad
"""
MonadPlus laws:
mzero >= f == mzero
v >> mzero == mzero
"""
@classmethod
def mzero(cls):
"The mzero value."
raise NotImplementedError("mzero must be defined.")
def mplus(self, other):
"""An associative combined operation."""
raise NotImplementedError
def sequence(monad_t, monad_list):
"""Evaluates each action in sequence from left to right and
collects the results."""
def helper(monad, acc):
"Helper for sequence."
return monad >= (lambda x:
(acc >= (lambda xs:
(monad_t.return_m(xs + [x])))))
return func.foldr(helper, monad_t.return_m([]), list(reversed(monad_list)))
def sequence_(monad_t, monad_list):
"""Evaluates each action in sequence from
left to right and dumps the results."""
return func.foldr(monad_t.then, monad_t.return_m(func.Unit()), monad_list)
def map_m(monad_t, transform, from_list):
"""Creates a list of monad_ts, then evaluates
them and keeps the results."""
return sequence(monad_t, [transform(a) for a in from_list])
def map_m_(monad_t, transform, from_list):
"""Creates a list of monad_ts, then evaluates
them and dumps the results."""
return sequence_(monad_t, [transform(a) for a in from_list])
def guard(monad_t, predicate):
"return_m(Unit()) if the predicate is true, else mzero"
if predicate:
return monad_t.return_m(func.Unit())
else:
return monad_t.mzero()
def msum(monad_t, monad_list):
"Generalized concatenation."
return func.foldr(monad_t.mplus(), monad_t.mzero(), monad_list)
def filter_m(monad_t, predicate, filter_list):
"""Generalize the list filter for other monads."""
if filter_list == []:
return monad_t.return_m([])
else:
first, rest_orig = filter_list[0], filter_list[1:]
return predicate(first) >= (lambda flg:
filter_m(monad_t, predicate, rest_orig) >= (lambda rest:
monad_t.return_m(rest + first if flg else rest)))
def for_m(monad_t, from_list, transform):
"Flipped map_m"
return map_m(monad_t, transform, from_list)
def for_m_(monad_t, from_list, transform):
"Flipped map_m_"
return map_m_(monad_t, transform, from_list)
@infix.Infix
def mcompl(a_to_monad_b, b_to_monad_c):
"""Left-to-right Kleisli composition."""
return lambda a: (a_to_monad_b(a) >= b_to_monad_c)
mcompr = infix.Infix(func.flip(mcompl))
mcompr.__doc__ = "Flipped Kleisli composition."
def forever(monad_action):
"Repeats a monad action infinitely."
return monad_action >> forever(monad_action)
def join(monad_of_monads):
"Removes a level of monadic structure."
return monad_of_monads >= (lambda x: x)
def map_and_unzip_m(monad_t, map_function, from_list):
"""
Maps a pair-generating function over the from_list, then unzips the result
and returns a pair of lists.
"""
return sequence(monad_t, map(map_function, from_list)) >= \
(lambda r: monad_t.return_m(func.unzip(r)))
def zip_with_m(monad_t, zip_function, left, right):
"Generalizes zip_with over non-list monads."
return sequence(monad_t, func.zip_with(zip_function, left, right))
def zip_with_m_(monad_t, zip_function, left, right):
"Same as zip_with_m, but ignores the result."
return sequence_(monad_t, func.zip_with(zip_function, left, right))
def fold_m(monad_t, folder, acc, from_list, first=True):
"""Like foldl but the result is encapsulated in a monad.
Equivalent to:
folder acc1 from_list1 >=
lambda acc2: folder acc2 from_list2 >=
...
return folder accm from_listm
"""
if first:
from_list = list(reversed(from_list))
if from_list == []:
return monad_t.return_m(acc)
else:
return folder(acc, from_list.pop()) >= \
(lambda fld: fold_m(monad_t, folder, fld, from_list, False))
def fold_m_(monad_t, folder, acc, from_list):
"fold_m but the result is thrown away."
return fold_m(monad_t, folder, acc, from_list) >> \
monad_t.return_m(func.Unit())
def replicate_m(monad_t, replications, monad_item):
"Generalized replicate for monads. Preforms the action n times."
return sequence(monad_t, func.replicate(monad_item, replications))
def replicate_m_(monad_t, replications, monad_item):
"Like replicateM, but discards the result."
return sequence_(monad_t, func.replicate(monad_item, replications))
def when(monad_t, predicate, action):
"Conditional execution of monads."
return action if predicate else monad_t.return_m(func.Unit())
def unless(monad_t, predicate, action):
"The opposite of when."
return when(monad_t, not predicate, action)
# The liftM functions, as well as ap, are cumbersome and mostly unneeded in
# python. However, using python tuples and * magic, which breaks Haskell's
# type system, you can actually define a lift_m_n function like so:
def lift_m_n(monad_t, function, *monad_list):
"""
By using a variadic function, we have successfully
created a lift_m_n function. This would not be allowed in Haskell's
type system, which is why it does not exist there.
"""
return function(*sequence(monad_t, monad_list))
def mfilter(monad_t, predicate, monad_action):
"MonadPlus equivalent of filter for lists."
return monad_action >= (lambda a:
monad_t.return_m(a) if predicate else monad_t.mzero())
|
|
"""
Utilities for working with numpy arrays.
"""
from datetime import datetime
from warnings import (
catch_warnings,
filterwarnings,
)
from numpy import (
broadcast,
busday_count,
datetime64,
dtype,
empty,
nan,
vectorize,
where
)
from numpy.lib.stride_tricks import as_strided
from toolz import flip
uint8_dtype = dtype('uint8')
bool_dtype = dtype('bool')
int64_dtype = dtype('int64')
float32_dtype = dtype('float32')
float64_dtype = dtype('float64')
complex128_dtype = dtype('complex128')
datetime64D_dtype = dtype('datetime64[D]')
datetime64ns_dtype = dtype('datetime64[ns]')
object_dtype = dtype('O')
# We use object arrays for strings.
categorical_dtype = object_dtype
make_datetime64ns = flip(datetime64, 'ns')
make_datetime64D = flip(datetime64, 'D')
NaTmap = {
dtype('datetime64[%s]' % unit): datetime64('NaT', unit)
for unit in ('ns', 'us', 'ms', 's', 'm', 'D')
}
NaT_for_dtype = NaTmap.__getitem__
NaTns = NaT_for_dtype(datetime64ns_dtype)
NaTD = NaT_for_dtype(datetime64D_dtype)
_FILLVALUE_DEFAULTS = {
bool_dtype: False,
float32_dtype: nan,
float64_dtype: nan,
datetime64ns_dtype: NaTns,
object_dtype: None,
}
INT_DTYPES_BY_SIZE_BYTES = {
1: dtype('int8'),
2: dtype('int16'),
4: dtype('int32'),
8: dtype('int64'),
}
def int_dtype_with_size_in_bytes(size):
try:
return INT_DTYPES_BY_SIZE_BYTES[size]
except KeyError:
raise ValueError("No integral dtype whose size is %d bytes." % size)
class NoDefaultMissingValue(Exception):
pass
def make_kind_check(python_types, numpy_kind):
"""
Make a function that checks whether a scalar or array is of a given kind
(e.g. float, int, datetime, timedelta).
"""
def check(value):
if hasattr(value, 'dtype'):
return value.dtype.kind == numpy_kind
return isinstance(value, python_types)
return check
is_float = make_kind_check(float, 'f')
is_int = make_kind_check(int, 'i')
is_datetime = make_kind_check(datetime, 'M')
is_object = make_kind_check(object, 'O')
def coerce_to_dtype(dtype, value):
"""
Make a value with the specified numpy dtype.
Only datetime64[ns] and datetime64[D] are supported for datetime dtypes.
"""
name = dtype.name
if name.startswith('datetime64'):
if name == 'datetime64[D]':
return make_datetime64D(value)
elif name == 'datetime64[ns]':
return make_datetime64ns(value)
else:
raise TypeError(
"Don't know how to coerce values of dtype %s" % dtype
)
return dtype.type(value)
def default_missing_value_for_dtype(dtype):
"""
Get the default fill value for `dtype`.
"""
try:
return _FILLVALUE_DEFAULTS[dtype]
except KeyError:
raise NoDefaultMissingValue(
"No default value registered for dtype %s." % dtype
)
def repeat_first_axis(array, count):
"""
Restride `array` to repeat `count` times along the first axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape (count,) + array.shape, composed of `array` repeated
`count` times along the first axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_first_axis(a, 2)
array([[0, 1, 2],
[0, 1, 2]])
>>> repeat_first_axis(a, 4)
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, (count,) + array.shape, (0,) + array.strides)
def repeat_last_axis(array, count):
"""
Restride `array` to repeat `count` times along the last axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape array.shape + (count,) composed of `array` repeated
`count` times along the last axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_last_axis(a, 2)
array([[0, 0],
[1, 1],
[2, 2]])
>>> repeat_last_axis(a, 4)
array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, array.shape + (count,), array.strides + (0,))
def rolling_window(array, length):
"""
Restride an array of shape
(X_0, ... X_N)
into an array of shape
(length, X_0 - length + 1, ... X_N)
where each slice at index i along the first axis is equivalent to
result[i] = array[length * i:length * (i + 1)]
Parameters
----------
array : np.ndarray
The base array.
length : int
Length of the synthetic first axis to generate.
Returns
-------
out : np.ndarray
Example
-------
>>> from numpy import arange
>>> a = arange(25).reshape(5, 5)
>>> a
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> rolling_window(a, 2)
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9]],
<BLANKLINE>
[[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
<BLANKLINE>
[[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
<BLANKLINE>
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]]])
"""
orig_shape = array.shape
if not orig_shape:
raise IndexError("Can't restride a scalar.")
elif orig_shape[0] <= length:
raise IndexError(
"Can't restride array of shape {shape} with"
" a window length of {len}".format(
shape=orig_shape,
len=length,
)
)
num_windows = (orig_shape[0] - length + 1)
new_shape = (num_windows, length) + orig_shape[1:]
new_strides = (array.strides[0],) + array.strides
return as_strided(array, new_shape, new_strides)
# Sentinel value that isn't NaT.
_notNaT = make_datetime64D(0)
def busday_count_mask_NaT(begindates, enddates, out=None):
"""
Simple of numpy.busday_count that returns `float` arrays rather than int
arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`.
Doesn't support custom weekdays or calendars, but probably should in the
future.
See Also
--------
np.busday_count
"""
if out is None:
out = empty(broadcast(begindates, enddates).shape, dtype=float)
beginmask = (begindates == NaTD)
endmask = (enddates == NaTD)
out = busday_count(
# Temporarily fill in non-NaT values.
where(beginmask, _notNaT, begindates),
where(endmask, _notNaT, enddates),
out=out,
)
# Fill in entries where either comparison was NaT with nan in the output.
out[beginmask | endmask] = nan
return out
class WarningContext(object):
"""
Re-usable contextmanager for contextually managing warnings.
"""
def __init__(self, *warning_specs):
self._warning_specs = warning_specs
self._catchers = []
def __enter__(self):
catcher = catch_warnings()
catcher.__enter__()
self._catchers.append(catcher)
for args, kwargs in self._warning_specs:
filterwarnings(*args, **kwargs)
return self
def __exit__(self, *exc_info):
catcher = self._catchers.pop()
return catcher.__exit__(*exc_info)
def ignore_nanwarnings():
"""
Helper for building a WarningContext that ignores warnings from numpy's
nanfunctions.
"""
return WarningContext(
(
('ignore',),
{'category': RuntimeWarning, 'module': 'numpy.lib.nanfunctions'},
)
)
def vectorized_is_element(array, choices):
"""
Check if each element of ``array`` is in choices.
Parameters
----------
array : np.ndarray
choices : object
Object implementing __contains__.
Returns
-------
was_element : np.ndarray[bool]
Array indicating whether each element of ``array`` was in ``choices``.
"""
return vectorize(choices.__contains__, otypes=[bool])(array)
|
|
import collections
import json
import logging
logger = logging.getLogger('app.subscriber')
class Subscriber(object):
"""Subscribes to messages from WAMP Router on 'com.opentrons.browser_to_robot' and dispatches commands according to the :obj:`dispatcher` dictionary.
The Subscriber class is intended to be intantiated into a subscriber object
to dispatch commands from the GUI and ProtocolRunner to the appropriate object(s)
for robot actions.
The subscriber object holds references to all the relevant objects such
as the head, queue objects etc.
:dispatcher:
* 'home' : lambda self, data: self.home(data),
* 'stop' : lambda self, data: self.head.theQueue.kill(data),
* 'reset' : lambda self: self.reset(),
* 'move' : lambda self, data: self.head.move(data),
* 'step' : lambda self, data: self.head.step(data),
* 'calibratePipette' : lambda self, data: self.calibrate_pipette(data),
* 'calibrateContainer' : lambda self, data: self.calibrate_container(data),
* 'getCalibrations' : lambda self: self.get_calibrations(),
* 'saveVolume' : lambda self, data: self.head.save_volume(data),
* 'movePipette' : lambda self, data: self.move_pipette(data),
* 'movePlunger' : lambda self, data: self.move_plunger(data),
* 'speed' : lambda self, data: self.speed(data),
* 'createDeck' : lambda self, data: self.create_deck(data),
* 'instructions' : lambda self, data: self.instructions(data),
* 'infinity' : lambda self, data: self.infinity(data),
* 'pauseJob' : lambda self: self.head.theQueue.pause_job(),
* 'resumeJob' : lambda self: self.head.theQueue.resume_job(),
* 'eraseJob' : lambda self: self.runner.insQueue.erase_job(),
* 'raw' : lambda self, data: self.head.raw(data),
* 'update' : lambda self, data: self.loop.create_task(self.update(data)),
* 'wifimode' : lambda self, data: self.wifi_mode(data),
* 'wifiscan' : lambda self, data: self.wifi_scan(data),
* 'hostname' : lambda self, data: self.change_hostname(data),
* 'poweroff' : lambda self: self.poweroff(),
* 'reboot' : lambda self: self.reboot(),
* 'shareinet': lambda self: self.loop.create_task(self.share_inet()),
* 'restart' : lambda self: self.restart()
:todo:
- clean up inclusion of head and runner objects -> referenced by dispatch
- move publishing into respective objects and have those objects use :class:`publisher` a la :meth:`get_calibrations` (:meth:`create_deck`, :meth:`wifi_scan`)
"""
def __init__(self, session,loop):
self.head = None
self.deck = None
self.runner = None
self.caller = session
self.loop = loop
def __str__(self):
return "Subscriber"
def home(self, data):
"""Intermediate step to start a homing sequence
"""
logger.debug('subscriber.home called')
self.runner.insQueue.infinity_data = None
self.runner.insQueue.erase_job()
self.head.home(data)
def list_ports(self):
if self.head:
temp_ports = self.head.smoothieAPI.list_serial_ports()
self.head.pubber.send_message('portsList',temp_ports)
def connect_port(self, portname):
if self.head:
self.head.smoothieAPI.connect(portname)
def reset(self):
"""Intermediate step to reset Smoothieboard
"""
logger.debug('subscriber.reset called')
self.runner.insQueue.infinity_data = None
self.head.theQueue.reset()
def set_head(self, head):
"""Set reference to :class:`head` object
"""
logger.debug('subscriber.set_head called')
self.head = head
def set_deck(self, deck):
self.deck = deck
def set_runner(self, runner):
"""Set reference to :class:`protocol_runner` object
"""
logger.debug('subscriber.set_runner called')
self.runner = runner
def dispatch_message(self, message):
"""The first point of contact for incoming messages.
"""
logger.debug('subscriber.dispatch_message called')
logger.debug('\nmessage: {}'.format(message))
try:
dictum = collections.OrderedDict(json.loads(message.strip(), object_pairs_hook=collections.OrderedDict))
logger.debug('\tdictum[type]: {}'.format(dictum['type']))
if 'data' in dictum:
logger.debug('\tdictum[data]: {}'.format(json.dumps(dictum['data'],sort_keys=True,indent=4,separators=(',',': '))))
self.dispatch(dictum['type'],dictum['data'])
else:
self.dispatch(dictum['type'],None)
except Exception as e:
logger.exception('*** error in subscriber.dispatch_message ***')
raise e
def dispatch(self, type_, data):
"""Dispatch commands according to :obj:`dispatcher` dictionary
"""
logger.debug('subscriber.dispatch called')
logger.debug('type_: {0}, data: {1}'.format(type_, data))
if data is not None:
self.dispatcher[type_](self,data)
else:
self.dispatcher[type_](self)
def calibrate_pipette(self, data):
"""Tell the :head:`head` to calibrate a :class:`pipette`
"""
logger.debug('subscriber.calibrate_pipette called')
logger.debug('\nargs: {}'.format(data))
if 'axis' in data and 'property' in data:
axis = data['axis']
property_ = data['property']
self.head.calibrate_pipette(axis, property_)
self.get_calibrations()
def calibrate_container(self, data):
"""Tell the :class:`head` to calibrate a container
"""
logger.debug('subscriber.calibrate_container called')
logger.debug('args: {}'.format(data))
if 'axis' in data and 'name' in data:
axis = data['axis']
container_ = data['name']
self.head.calibrate_container(axis, container_)
self.get_calibrations()
def container_depth_override(self, data):
logger.debug('subscriber.container_depth_override called')
container_name = data['name']
new_depth = data['depth']
self.deck.container_depth_override(container_name,new_depth)
def get_calibrations(self):
"""Tell the :class:`head` to publish calibrations
"""
logger.debug('subscriber.get_calibrations called')
self.head.publish_calibrations()
def get_containers(self):
self.deck.publish_containers()
def move_pipette(self, data):
"""Tell the :class:`head` to move a :class:`pipette`
"""
logger.debug('subscriber.move_pipette called')
axis = data['axis']
property_ = data['property']
self.head.move_pipette(axis, property_)
def move_plunger(self, data):
"""Tell the :class:`head` to move a :class:`pipette` to given location(s)
"""
logger.debug('subscriber.move_plunger called')
logger.debug('data: {}'.format(data))
self.head.move_plunger(data['axis'], data['locations'])
def speed(self, data):
"""Tell the :class:`head` to change speed
"""
logger.debug('subscriber.speed called')
logger.debug('data: {}'.format(data))
axis = data['axis']
value = data['value']
if axis=='ab':
self.head.set_speed('a', value)
self.head.set_speed('b', value)
else:
self.head.set_speed(axis, value)
def create_deck(self, data):
"""Intermediate step to have :class:`head` load deck data and return deck information back to Browser
:todo:
move publishing into respective objects and have those objects use :class:`publisher` a la :meth:`get_calibrations` (:meth:`create_deck`, :meth:`wifi_scan`)
"""
logger.debug('subscriber.create_deck called')
logger.debug('\targs: {}'.format(data))
msg = {
'type' : 'containerLocations',
'data' : self.head.create_deck(data)
}
self.caller._myAppSession.publish('com.opentrons.robot_to_browser',json.dumps(msg,sort_keys=True,indent=4,separators=(',',': ')))
def configure_head(self, data):
logger.debug('subscriber.configure_head called')
logger.debug('\targs: {}'.format(data))
self.head.configure_head(data)
def instructions(self, data):
"""Intermediate step to have :class:`prtocol_runner` and :class:`the_queue` start running a protocol
"""
logger.debug('subscriber.instructions called')
logger.debug('\targs: {}'.format(data))
if data and len(data):
self.runner.insQueue.start_job (data, True)
def infinity(self, data):
"""Intermediate step to have :class:`protocol_runner` and :class:`the_queue` run a protocol to infinity and beyond
"""
logger.debug('subscriber.infinity called')
if data and len(data):
self.runner.insQueue.start_infinity_job (data)
#instantiate/activate the dispatcher/router dictionary
#create Dispatcher dictionary object which is the equivalent of the
#previous socketHandlers object in js code
dispatcher = {'home' : lambda self, data: self.home(data),
'stop' : lambda self, data: self.head.theQueue.kill(data),
'reset' : lambda self: self.reset(),
'move' : lambda self, data: self.head.move(data),
'step' : lambda self, data: self.head.step(data),
'calibratePipette' : lambda self, data: self.calibrate_pipette(data), #needs xtra code
'calibrateContainer' : lambda self, data: self.calibrate_container(data),
'getCalibrations' : lambda self: self.get_calibrations(),
'saveVolume' : lambda self, data: self.head.save_volume(data),
'movePipette' : lambda self, data: self.move_pipette(data),#needs xtra code
'movePlunger' : lambda self, data: self.move_plunger(data),
'speed' : lambda self, data: self.speed(data), #needs xtra code
'getContainers' : lambda self: self.get_containers(),
'createDeck' : lambda self, data: self.create_deck(data),#needs xtra code
'configureHead' : lambda self, data: self.configure_head(data),
'relativeCoords' : lambda self: self.head.relative_coords(),
'instructions' : lambda self, data: self.instructions(data),#needs xtra code
'infinity' : lambda self, data: self.infinity(data),
'pauseJob' : lambda self: self.head.theQueue.pause_job(),
'resumeJob' : lambda self: self.head.theQueue.resume_job(),
'eraseJob' : lambda self: self.runner.insQueue.erase_job(),
'raw' : lambda self, data: self.head.raw(data),
'update' : lambda self, data: self.loop.create_task(self.update(data)),
'wifimode' : lambda self, data: self.wifi_mode(data),
'wifiscan' : lambda self, data: self.wifi_scan(data),
'hostname' : lambda self, data: self.change_hostname(data),
'poweroff' : lambda self: self.poweroff(),
'reboot' : lambda self: self.reboot(),
'shareinet': lambda self: self.loop.create_task(self.share_inet()),
'restart' : lambda self: self.restart(),
'containerDepthOverride': lambda self, data: self.container_depth_override(data),
'listPorts' : lambda self: self.list_ports(),
'connectPort' : lambda self, data: self.connect_port(data)
}
|
|
import cython
from Cython import __version__
import re, os, sys, time
try:
from glob import iglob
except ImportError:
# Py2.4
from glob import glob as iglob
try:
import gzip
gzip_open = gzip.open
gzip_ext = '.gz'
except ImportError:
gzip_open = open
gzip_ext = ''
import shutil
import subprocess
try:
import hashlib
except ImportError:
import md5 as hashlib
try:
from io import open as io_open
except ImportError:
from codecs import open as io_open
try:
from os.path import relpath as _relpath
except ImportError:
# Py<2.6
def _relpath(path, start=os.path.curdir):
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
from distutils.extension import Extension
from Cython import Utils
from Cython.Utils import cached_function, cached_method, path_exists, find_root_package_dir
from Cython.Compiler.Main import Context, CompilationOptions, default_options
join_path = cached_function(os.path.join)
if sys.version_info[0] < 3:
# stupid Py2 distutils enforces str type in list of sources
_fs_encoding = sys.getfilesystemencoding()
if _fs_encoding is None:
_fs_encoding = sys.getdefaultencoding()
def encode_filename_in_py2(filename):
if isinstance(filename, unicode):
return filename.encode(_fs_encoding)
return filename
else:
def encode_filename_in_py2(filename):
return filename
def extended_iglob(pattern):
if '**/' in pattern:
seen = set()
first, rest = pattern.split('**/', 1)
if first:
first = iglob(first+'/')
else:
first = ['']
for root in first:
for path in extended_iglob(join_path(root, rest)):
if path not in seen:
seen.add(path)
yield path
for path in extended_iglob(join_path(root, '*', '**/' + rest)):
if path not in seen:
seen.add(path)
yield path
else:
for path in iglob(pattern):
yield path
@cached_function
def file_hash(filename):
path = os.path.normpath(filename.encode("UTF-8"))
m = hashlib.md5(str(len(path)) + ":")
m.update(path)
f = open(filename, 'rb')
try:
data = f.read(65000)
while data:
m.update(data)
data = f.read(65000)
finally:
f.close()
return m.hexdigest()
def parse_list(s):
"""
>>> parse_list("a b c")
['a', 'b', 'c']
>>> parse_list("[a, b, c]")
['a', 'b', 'c']
>>> parse_list('a " " b')
['a', ' ', 'b']
>>> parse_list('[a, ",a", "a,", ",", ]')
['a', ',a', 'a,', ',']
"""
if s[0] == '[' and s[-1] == ']':
s = s[1:-1]
delimiter = ','
else:
delimiter = ' '
s, literals = strip_string_literals(s)
def unquote(literal):
literal = literal.strip()
if literal[0] in "'\"":
return literals[literal[1:-1]]
else:
return literal
return [unquote(item) for item in s.split(delimiter) if item.strip()]
transitive_str = object()
transitive_list = object()
distutils_settings = {
'name': str,
'sources': list,
'define_macros': list,
'undef_macros': list,
'libraries': transitive_list,
'library_dirs': transitive_list,
'runtime_library_dirs': transitive_list,
'include_dirs': transitive_list,
'extra_objects': list,
'extra_compile_args': transitive_list,
'extra_link_args': transitive_list,
'export_symbols': list,
'depends': transitive_list,
'language': transitive_str,
}
@cython.locals(start=long, end=long)
def line_iter(source):
start = 0
while True:
end = source.find('\n', start)
if end == -1:
yield source[start:]
return
yield source[start:end]
start = end+1
class DistutilsInfo(object):
def __init__(self, source=None, exn=None):
self.values = {}
if source is not None:
for line in line_iter(source):
line = line.strip()
if line != '' and line[0] != '#':
break
line = line[1:].strip()
if line[:10] == 'distutils:':
line = line[10:]
ix = line.index('=')
key = str(line[:ix].strip())
value = line[ix+1:].strip()
type = distutils_settings[key]
if type in (list, transitive_list):
value = parse_list(value)
if key == 'define_macros':
value = [tuple(macro.split('=')) for macro in value]
self.values[key] = value
elif exn is not None:
for key in distutils_settings:
if key in ('name', 'sources'):
continue
value = getattr(exn, key, None)
if value:
self.values[key] = value
def merge(self, other):
if other is None:
return self
for key, value in other.values.items():
type = distutils_settings[key]
if type is transitive_str and key not in self.values:
self.values[key] = value
elif type is transitive_list:
if key in self.values:
all = self.values[key]
for v in value:
if v not in all:
all.append(v)
else:
self.values[key] = value
return self
def subs(self, aliases):
if aliases is None:
return self
resolved = DistutilsInfo()
for key, value in self.values.items():
type = distutils_settings[key]
if type in [list, transitive_list]:
new_value_list = []
for v in value:
if v in aliases:
v = aliases[v]
if isinstance(v, list):
new_value_list += v
else:
new_value_list.append(v)
value = new_value_list
else:
if value in aliases:
value = aliases[value]
resolved.values[key] = value
return resolved
@cython.locals(start=long, q=long, single_q=long, double_q=long, hash_mark=long,
end=long, k=long, counter=long, quote_len=long)
def strip_string_literals(code, prefix='__Pyx_L'):
"""
Normalizes every string literal to be of the form '__Pyx_Lxxx',
returning the normalized code and a mapping of labels to
string literals.
"""
new_code = []
literals = {}
counter = 0
start = q = 0
in_quote = False
hash_mark = single_q = double_q = -1
code_len = len(code)
while True:
if hash_mark < q:
hash_mark = code.find('#', q)
if single_q < q:
single_q = code.find("'", q)
if double_q < q:
double_q = code.find('"', q)
q = min(single_q, double_q)
if q == -1: q = max(single_q, double_q)
# We're done.
if q == -1 and hash_mark == -1:
new_code.append(code[start:])
break
# Try to close the quote.
elif in_quote:
if code[q-1] == u'\\':
k = 2
while q >= k and code[q-k] == u'\\':
k += 1
if k % 2 == 0:
q += 1
continue
if code[q] == quote_type and (quote_len == 1 or (code_len > q + 2 and quote_type == code[q+1] == code[q+2])):
counter += 1
label = "%s%s_" % (prefix, counter)
literals[label] = code[start+quote_len:q]
full_quote = code[q:q+quote_len]
new_code.append(full_quote)
new_code.append(label)
new_code.append(full_quote)
q += quote_len
in_quote = False
start = q
else:
q += 1
# Process comment.
elif -1 != hash_mark and (hash_mark < q or q == -1):
new_code.append(code[start:hash_mark+1])
end = code.find('\n', hash_mark)
counter += 1
label = "%s%s_" % (prefix, counter)
if end == -1:
end_or_none = None
else:
end_or_none = end
literals[label] = code[hash_mark+1:end_or_none]
new_code.append(label)
if end == -1:
break
start = q = end
# Open the quote.
else:
if code_len >= q+3 and (code[q] == code[q+1] == code[q+2]):
quote_len = 3
else:
quote_len = 1
in_quote = True
quote_type = code[q]
new_code.append(code[start:q])
start = q
q += quote_len
return "".join(new_code), literals
dependancy_regex = re.compile(r"(?:^from +([0-9a-zA-Z_.]+) +cimport)|"
r"(?:^cimport +([0-9a-zA-Z_.]+)\b)|"
r"(?:^cdef +extern +from +['\"]([^'\"]+)['\"])|"
r"(?:^include +['\"]([^'\"]+)['\"])", re.M)
def normalize_existing(base_path, rel_paths):
return normalize_existing0(os.path.dirname(base_path), tuple(set(rel_paths)))
@cached_function
def normalize_existing0(base_dir, rel_paths):
normalized = []
for rel in rel_paths:
path = join_path(base_dir, rel)
if path_exists(path):
normalized.append(os.path.normpath(path))
else:
normalized.append(rel)
return normalized
def resolve_depends(depends, include_dirs):
include_dirs = tuple(include_dirs)
resolved = []
for depend in depends:
path = resolve_depend(depend, include_dirs)
if path is not None:
resolved.append(path)
return resolved
@cached_function
def resolve_depend(depend, include_dirs):
if depend[0] == '<' and depend[-1] == '>':
return None
for dir in include_dirs:
path = join_path(dir, depend)
if path_exists(path):
return os.path.normpath(path)
return None
@cached_function
def package(filename):
dir = os.path.dirname(os.path.abspath(str(filename)))
if dir != filename and path_exists(join_path(dir, '__init__.py')):
return package(dir) + (os.path.basename(dir),)
else:
return ()
@cached_function
def fully_qualified_name(filename):
module = os.path.splitext(os.path.basename(filename))[0]
return '.'.join(package(filename) + (module,))
@cached_function
def parse_dependencies(source_filename):
# Actual parsing is way to slow, so we use regular expressions.
# The only catch is that we must strip comments and string
# literals ahead of time.
fh = Utils.open_source_file(source_filename, "rU", error_handling='ignore')
try:
source = fh.read()
finally:
fh.close()
distutils_info = DistutilsInfo(source)
source, literals = strip_string_literals(source)
source = source.replace('\\\n', ' ').replace('\t', ' ')
# TODO: pure mode
cimports = []
includes = []
externs = []
for m in dependancy_regex.finditer(source):
cimport_from, cimport, extern, include = m.groups()
if cimport_from:
cimports.append(cimport_from)
elif cimport:
cimports.append(cimport)
elif extern:
externs.append(literals[extern])
else:
includes.append(literals[include])
return cimports, includes, externs, distutils_info
class DependencyTree(object):
def __init__(self, context, quiet=False):
self.context = context
self.quiet = quiet
self._transitive_cache = {}
def parse_dependencies(self, source_filename):
return parse_dependencies(source_filename)
@cached_method
def included_files(self, filename):
# This is messy because included files are textually included, resolving
# cimports (but not includes) relative to the including file.
all = set()
for include in self.parse_dependencies(filename)[1]:
include_path = join_path(os.path.dirname(filename), include)
if not path_exists(include_path):
include_path = self.context.find_include_file(include, None)
if include_path:
if '.' + os.path.sep in include_path:
include_path = os.path.normpath(include_path)
all.add(include_path)
all.update(self.included_files(include_path))
elif not self.quiet:
print("Unable to locate '%s' referenced from '%s'" % (filename, include))
return all
@cached_method
def cimports_and_externs(self, filename):
# This is really ugly. Nested cimports are resolved with respect to the
# includer, but includes are resolved with respect to the includee.
cimports, includes, externs = self.parse_dependencies(filename)[:3]
cimports = set(cimports)
externs = set(externs)
for include in self.included_files(filename):
included_cimports, included_externs = self.cimports_and_externs(include)
cimports.update(included_cimports)
externs.update(included_externs)
return tuple(cimports), normalize_existing(filename, externs)
def cimports(self, filename):
return self.cimports_and_externs(filename)[0]
def package(self, filename):
return package(filename)
def fully_qualified_name(self, filename):
return fully_qualified_name(filename)
@cached_method
def find_pxd(self, module, filename=None):
is_relative = module[0] == '.'
if is_relative and not filename:
raise NotImplementedError("New relative imports.")
if filename is not None:
module_path = module.split('.')
if is_relative:
module_path.pop(0) # just explicitly relative
package_path = list(self.package(filename))
while module_path and not module_path[0]:
try:
package_path.pop()
except IndexError:
return None # FIXME: error?
module_path.pop(0)
relative = '.'.join(package_path + module_path)
pxd = self.context.find_pxd_file(relative, None)
if pxd:
return pxd
if is_relative:
return None # FIXME: error?
return self.context.find_pxd_file(module, None)
@cached_method
def cimported_files(self, filename):
if filename[-4:] == '.pyx' and path_exists(filename[:-4] + '.pxd'):
pxd_list = [filename[:-4] + '.pxd']
else:
pxd_list = []
for module in self.cimports(filename):
if module[:7] == 'cython.' or module == 'cython':
continue
pxd_file = self.find_pxd(module, filename)
if pxd_file is not None:
pxd_list.append(pxd_file)
elif not self.quiet:
print("missing cimport in module '%s': %s" % (module, filename))
return tuple(pxd_list)
@cached_method
def immediate_dependencies(self, filename):
all = set([filename])
all.update(self.cimported_files(filename))
all.update(self.included_files(filename))
return all
def all_dependencies(self, filename):
return self.transitive_merge(filename, self.immediate_dependencies, set.union)
@cached_method
def timestamp(self, filename):
return os.path.getmtime(filename)
def extract_timestamp(self, filename):
return self.timestamp(filename), filename
def newest_dependency(self, filename):
return max([self.extract_timestamp(f) for f in self.all_dependencies(filename)])
def transitive_fingerprint(self, filename, extra=None):
try:
m = hashlib.md5(__version__)
m.update(file_hash(filename))
for x in sorted(self.all_dependencies(filename)):
if os.path.splitext(x)[1] not in ('.c', '.cpp', '.h'):
m.update(file_hash(x))
if extra is not None:
m.update(str(extra))
return m.hexdigest()
except IOError:
return None
def distutils_info0(self, filename):
info = self.parse_dependencies(filename)[3]
externs = self.cimports_and_externs(filename)[1]
if externs:
if 'depends' in info.values:
info.values['depends'] = list(set(info.values['depends']).union(externs))
else:
info.values['depends'] = list(externs)
return info
def distutils_info(self, filename, aliases=None, base=None):
return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge)
.subs(aliases)
.merge(base))
def transitive_merge(self, node, extract, merge):
try:
seen = self._transitive_cache[extract, merge]
except KeyError:
seen = self._transitive_cache[extract, merge] = {}
return self.transitive_merge_helper(
node, extract, merge, seen, {}, self.cimported_files)[0]
def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing):
if node in seen:
return seen[node], None
deps = extract(node)
if node in stack:
return deps, node
try:
stack[node] = len(stack)
loop = None
for next in outgoing(node):
sub_deps, sub_loop = self.transitive_merge_helper(next, extract, merge, seen, stack, outgoing)
if sub_loop is not None:
if loop is not None and stack[loop] < stack[sub_loop]:
pass
else:
loop = sub_loop
deps = merge(deps, sub_deps)
if loop == node:
loop = None
if loop is None:
seen[node] = deps
return deps, loop
finally:
del stack[node]
_dep_tree = None
def create_dependency_tree(ctx=None, quiet=False):
global _dep_tree
if _dep_tree is None:
if ctx is None:
ctx = Context(["."], CompilationOptions(default_options))
_dep_tree = DependencyTree(ctx, quiet=quiet)
return _dep_tree
# This may be useful for advanced users?
def create_extension_list(patterns, exclude=[], ctx=None, aliases=None, quiet=False, exclude_failures=False):
if not isinstance(patterns, (list, tuple)):
patterns = [patterns]
explicit_modules = set([m.name for m in patterns if isinstance(m, Extension)])
seen = set()
deps = create_dependency_tree(ctx, quiet=quiet)
to_exclude = set()
if not isinstance(exclude, list):
exclude = [exclude]
for pattern in exclude:
to_exclude.update(map(os.path.abspath, extended_iglob(pattern)))
module_list = []
for pattern in patterns:
if isinstance(pattern, str):
filepattern = pattern
template = None
name = '*'
base = None
exn_type = Extension
elif isinstance(pattern, Extension):
filepattern = pattern.sources[0]
if os.path.splitext(filepattern)[1] not in ('.py', '.pyx'):
# ignore non-cython modules
module_list.append(pattern)
continue
template = pattern
name = template.name
base = DistutilsInfo(exn=template)
exn_type = template.__class__
else:
raise TypeError(pattern)
for file in extended_iglob(filepattern):
if os.path.abspath(file) in to_exclude:
continue
pkg = deps.package(file)
if '*' in name:
module_name = deps.fully_qualified_name(file)
if module_name in explicit_modules:
continue
else:
module_name = name
if module_name not in seen:
try:
kwds = deps.distutils_info(file, aliases, base).values
except Exception:
if exclude_failures:
continue
raise
if base is not None:
for key, value in base.values.items():
if key not in kwds:
kwds[key] = value
sources = [file]
if template is not None:
sources += template.sources[1:]
if 'sources' in kwds:
# allow users to add .c files etc.
for source in kwds['sources']:
source = encode_filename_in_py2(source)
if source not in sources:
sources.append(source)
del kwds['sources']
if 'depends' in kwds:
depends = resolve_depends(kwds['depends'], (kwds.get('include_dirs') or []) + [find_root_package_dir(file)])
if template is not None:
# Always include everything from the template.
depends = list(set(template.depends).union(set(depends)))
kwds['depends'] = depends
module_list.append(exn_type(
name=module_name,
sources=sources,
**kwds))
m = module_list[-1]
seen.add(name)
return module_list
# This is the user-exposed entry point.
def cythonize(module_list, exclude=[], nthreads=0, aliases=None, quiet=False, force=False,
exclude_failures=False, **options):
"""
Compile a set of source modules into C/C++ files and return a list of distutils
Extension objects for them.
As module list, pass either a glob pattern, a list of glob patterns or a list of
Extension objects. The latter allows you to configure the extensions separately
through the normal distutils options.
When using glob patterns, you can exclude certain module names explicitly
by passing them into the 'exclude' option.
For parallel compilation, set the 'nthreads' option to the number of
concurrent builds.
For a broad 'try to compile' mode that ignores compilation failures and
simply excludes the failed extensions, pass 'exclude_failures=True'. Note
that this only really makes sense for compiling .py files which can also
be used without compilation.
Additional compilation options can be passed as keyword arguments.
"""
if 'include_path' not in options:
options['include_path'] = ['.']
if 'common_utility_include_dir' in options:
if options.get('cache'):
raise NotImplementedError("common_utility_include_dir does not yet work with caching")
if not os.path.exists(options['common_utility_include_dir']):
os.makedirs(options['common_utility_include_dir'])
c_options = CompilationOptions(**options)
cpp_options = CompilationOptions(**options); cpp_options.cplus = True
ctx = c_options.create_context()
options = c_options
module_list = create_extension_list(
module_list,
exclude=exclude,
ctx=ctx,
quiet=quiet,
exclude_failures=exclude_failures,
aliases=aliases)
deps = create_dependency_tree(ctx, quiet=quiet)
build_dir = getattr(options, 'build_dir', None)
modules_by_cfile = {}
to_compile = []
for m in module_list:
if build_dir:
root = os.path.realpath(os.path.abspath(find_root_package_dir(m.sources[0])))
def copy_to_build_dir(filepath, root=root):
filepath_abs = os.path.realpath(os.path.abspath(filepath))
if os.path.isabs(filepath):
filepath = filepath_abs
if filepath_abs.startswith(root):
mod_dir = os.path.join(build_dir,
os.path.dirname(_relpath(filepath, root)))
if not os.path.isdir(mod_dir):
os.makedirs(mod_dir)
shutil.copy(filepath, mod_dir)
for dep in m.depends:
copy_to_build_dir(dep)
new_sources = []
for source in m.sources:
base, ext = os.path.splitext(source)
if ext in ('.pyx', '.py'):
if m.language == 'c++':
c_file = base + '.cpp'
options = cpp_options
else:
c_file = base + '.c'
options = c_options
# setup for out of place build directory if enabled
if build_dir:
c_file = os.path.join(build_dir, c_file)
dir = os.path.dirname(c_file)
if not os.path.isdir(dir):
os.makedirs(dir)
if os.path.exists(c_file):
c_timestamp = os.path.getmtime(c_file)
else:
c_timestamp = -1
# Priority goes first to modified files, second to direct
# dependents, and finally to indirect dependents.
if c_timestamp < deps.timestamp(source):
dep_timestamp, dep = deps.timestamp(source), source
priority = 0
else:
dep_timestamp, dep = deps.newest_dependency(source)
priority = 2 - (dep in deps.immediate_dependencies(source))
if force or c_timestamp < dep_timestamp:
if not quiet:
if source == dep:
print("Compiling %s because it changed." % source)
else:
print("Compiling %s because it depends on %s." % (source, dep))
if not force and hasattr(options, 'cache'):
extra = m.language
fingerprint = deps.transitive_fingerprint(source, extra)
else:
fingerprint = None
to_compile.append((priority, source, c_file, fingerprint, quiet,
options, not exclude_failures))
new_sources.append(c_file)
if c_file not in modules_by_cfile:
modules_by_cfile[c_file] = [m]
else:
modules_by_cfile[c_file].append(m)
else:
new_sources.append(source)
if build_dir:
copy_to_build_dir(source)
m.sources = new_sources
if hasattr(options, 'cache'):
if not os.path.exists(options.cache):
os.makedirs(options.cache)
to_compile.sort()
if nthreads:
# Requires multiprocessing (or Python >= 2.6)
try:
import multiprocessing
pool = multiprocessing.Pool(nthreads)
except (ImportError, OSError):
print("multiprocessing required for parallel cythonization")
nthreads = 0
else:
pool.map(cythonize_one_helper, to_compile)
if not nthreads:
for args in to_compile:
cythonize_one(*args[1:])
if exclude_failures:
failed_modules = set()
for c_file, modules in modules_by_cfile.iteritems():
if not os.path.exists(c_file):
failed_modules.update(modules)
elif os.path.getsize(c_file) < 200:
f = io_open(c_file, 'r', encoding='iso8859-1')
try:
if f.read(len('#error ')) == '#error ':
# dead compilation result
failed_modules.update(modules)
finally:
f.close()
if failed_modules:
for module in failed_modules:
module_list.remove(module)
print("Failed compilations: %s" % ', '.join(sorted([
module.name for module in failed_modules])))
if hasattr(options, 'cache'):
cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
# cythonize() is often followed by the (non-Python-buffered)
# compiler output, flush now to avoid interleaving output.
sys.stdout.flush()
return module_list
if os.environ.get('XML_RESULTS'):
compile_result_dir = os.environ['XML_RESULTS']
def record_results(func):
def with_record(*args):
t = time.time()
success = True
try:
try:
func(*args)
except:
success = False
finally:
t = time.time() - t
module = fully_qualified_name(args[0])
name = "cythonize." + module
failures = 1 - success
if success:
failure_item = ""
else:
failure_item = "failure"
output = open(os.path.join(compile_result_dir, name + ".xml"), "w")
output.write("""
<?xml version="1.0" ?>
<testsuite name="%(name)s" errors="0" failures="%(failures)s" tests="1" time="%(t)s">
<testcase classname="%(name)s" name="cythonize">
%(failure_item)s
</testcase>
</testsuite>
""".strip() % locals())
output.close()
return with_record
else:
record_results = lambda x: x
# TODO: Share context? Issue: pyx processing leaks into pxd module
@record_results
def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, raise_on_failure=True):
from Cython.Compiler.Main import compile, default_options
from Cython.Compiler.Errors import CompileError, PyrexError
if fingerprint:
if not os.path.exists(options.cache):
try:
os.mkdir(options.cache)
except:
if not os.path.exists(options.cache):
raise
# Cython-generated c files are highly compressible.
# (E.g. a compression ratio of about 10 for Sage).
fingerprint_file = join_path(
options.cache, "%s-%s%s" % (os.path.basename(c_file), fingerprint, gzip_ext))
if os.path.exists(fingerprint_file):
if not quiet:
print("Found compiled %s in cache" % pyx_file)
os.utime(fingerprint_file, None)
g = gzip_open(fingerprint_file, 'rb')
try:
f = open(c_file, 'wb')
try:
shutil.copyfileobj(g, f)
finally:
f.close()
finally:
g.close()
return
if not quiet:
print("Cythonizing %s" % pyx_file)
if options is None:
options = CompilationOptions(default_options)
options.output_file = c_file
any_failures = 0
try:
result = compile([pyx_file], options)
if result.num_errors > 0:
any_failures = 1
except (EnvironmentError, PyrexError), e:
sys.stderr.write('%s\n' % e)
any_failures = 1
# XXX
import traceback
traceback.print_exc()
except Exception:
if raise_on_failure:
raise
import traceback
traceback.print_exc()
any_failures = 1
if any_failures:
if raise_on_failure:
raise CompileError(None, pyx_file)
elif os.path.exists(c_file):
os.remove(c_file)
elif fingerprint:
f = open(c_file, 'rb')
try:
g = gzip_open(fingerprint_file, 'wb')
try:
shutil.copyfileobj(f, g)
finally:
g.close()
finally:
f.close()
def cythonize_one_helper(m):
import traceback
try:
return cythonize_one(*m[1:])
except Exception:
traceback.print_exc()
raise
def cleanup_cache(cache, target_size, ratio=.85):
try:
p = subprocess.Popen(['du', '-s', '-k', os.path.abspath(cache)], stdout=subprocess.PIPE)
res = p.wait()
if res == 0:
total_size = 1024 * int(p.stdout.read().strip().split()[0])
if total_size < target_size:
return
except (OSError, ValueError):
pass
total_size = 0
all = []
for file in os.listdir(cache):
path = join_path(cache, file)
s = os.stat(path)
total_size += s.st_size
all.append((s.st_atime, s.st_size, path))
if total_size > target_size:
for time, size, file in reversed(sorted(all)):
os.unlink(file)
total_size -= size
if total_size < target_size * ratio:
break
|
|
#!/bin/python2.7
import re
import glob
import sys
import os
from fnmatch import fnmatch
################################################################
# lib_file_t
################################################################
class lib_file_t:
""" top class for lib file parser. 3 steps:
1. read_file: >> (line_t)ls_all_line
2. parse_syntax: >> (lib_obj_t) top_obj
3. parse_semantic: >> (library_t) (cell_t) (bus_t) (pin_t) (timing_t)
"""
def __init__(self, lib_fpath):
assert os.path.isfile(lib_fpath), 'lib file (%s) does not exist' % lib_fpath
self.lib_fpath = lib_fpath
print '** lib_parser: begin to parse %s' % lib_fpath
print '** lib_parser: 1. read_file'
self._read_file()
print '** lib_parser: 2. parse_syntax'
self._parse_syntax()
print '** lib_parser: 3. parse_semantic'
self._parse_semantic()
print '** lib_parser: end parsing %s' % lib_fpath
def _read_file(self):
'read LIB file >> (line_t) ls_all_line ' # {{{
self.ls_all_line = list()
f = open(self.lib_fpath, 'r')
is_continue = False
is_comment = False
for (i, string) in enumerate(f):
line_num = i+1
line = line_t(string, line_num, is_comment=is_comment)
line.is_continue = is_continue
if (hasattr(line, 'is_comment_begin') and line.is_comment_begin):
is_comment = True
elif (hasattr(line, 'is_comment_end') and line.is_comment_end):
is_comment = False
self.ls_all_line.append(line)
# deal with continue lines: concat to last !is_continue line
if (line.is_continue):
j = i-1
while(self.ls_all_line[j].is_continue):
j -= 1
self.ls_all_line[j].string += line.string
line.string = ''
line.is_empty = True
# if this line has "\" on tail, then the next line continues to this one
if (line.has_continue):
is_continue = True
else:
is_continue = False
f.close()
# end of read }}}
def _parse_syntax(self):
'>> (lib_obj_t) self.top_obj' # {{{
ls_pat = list()
ls_pat.append(dt_pat['multi_begin'])
ls_pat.append(dt_pat['multi_end'])
ls_pat.append(dt_pat['single_colon'])
ls_pat.append(dt_pat['single_paren'])
# find line match >> line.syntax_type/ls_syntax_match
for line in self.ls_all_line:
line.syntax_type = ''
line.ls_syntax_match = None
if line.is_empty:
continue
# matching
for pat in ls_pat:
m = pat.match(line.string)
if (m != None):
line.syntax_type = pat.name
line.ls_syntax_match = m
break
# if no match
assert (line.syntax_type != ''), 'syntax error @ line %d (%s)' % (line.num, line.string)
# create (lib_obj_t) top_obj: recursion start point
for line in self.ls_all_line:
if not line.is_empty:
break
self.top_obj = lib_obj_t(line.num, self.ls_all_line)
# end of parse_syntax }}}
def _parse_semantic(self):
'>> (library_t) self.library' # {{{
# create semantic instance: recursion start point
self.library = library_t(self.top_obj)
# end of parse_semantic }}}
def write_back(self, new_lib_fpath):
# {{{
f = open(new_lib_fpath, 'w')
for line in self.ls_all_line:
f.write(str(line))
f.close()
# }}}
#===========================================================
# pat_t
#===========================================================
class pat_t:
'compiled pattern object and their type names'
def __init__(self, name, re_str):
# {{{
self.name = name
self.re_str = re_str
self.re = re.compile(re_str, re.IGNORECASE)
self.sub = self.re.sub
# }}}
def is_found(self, line):
'>> True/False' # {{{
m = self.re.search(line)
if (m == None):
return False
else:
return True
# }}}
def match(self, line):
'>> None or ls_matched_string' # {{{
m = self.re.match(line)
if (m == None):
return None
else:
ls = list()
for s in m.groups():
ls.append(s.strip('"'))
return ls
# }}}
#===========================================================
# line_t
#===========================================================
pat_comment = pat_t('comment', r'\/\*.*\*\/')
pat_comment_begin = pat_t('comment_begin', r'\/\*.*$')
pat_comment_end = pat_t('comment_begin', r'^.*\*\/')
class line_t:
""" content of each liberty file line
1. comment/space/\n removed
2. continuous line combined
3. open for future extension
"""
def __init__(self, raw_string, num, is_comment=False):
# self.string/is_empty/has_continue/is_continue {{{
self.raw_string = raw_string # original string with \n
self.num = num # line number, = idx+1
# remove \n
string = raw_string.strip()
# remove comment
self.is_comment_begin = False
self.is_comment_end = False
if (pat_comment.is_found(string)):
# normal comment line
string = pat_comment.re.sub('', string)
elif (pat_comment_begin.is_found(string)):
# multi-line comment begin
assert (is_comment == False), 'Error: found multi-line comment begin while last multi-line comment has not ended @ line %d' % self.num
self.is_comment_begin = True
string = pat_comment_begin.re.sub('', string).strip()
elif (pat_comment_end.is_found(string)):
# multi-line comment end
assert (is_comment == True), 'Error: found multi-line comment end without comment begin @ line %d' % self.num
self.is_comment_end = True
string = pat_comment_end.re.sub('', string).strip()
elif (is_comment):
string = ''
# # lowercase
# string = string.lower()
# remove unwanted space, which doesn't include space between " "
is_unwanted_space = True
new_string = ''
for c in string:
if (c == '"'):
is_unwanted_space = not is_unwanted_space
if ((c == ' ') and is_unwanted_space):
continue
new_string += c
self.string = new_string # content string without comment, space, \n
if (self.string == ''):
self.is_empty = True
else:
self.is_empty = False
self.has_continue = False
if (self.string.endswith('\\')):
self.has_continue = True
self.string.strip('\\')
self.is_continue = False
# }}}
def append(self, new_raw_string):
'>> ls_append_string. add one new_raw_string after current line' # {{{
if not hasattr(self, 'ls_append_string'):
self.ls_append_string = list()
self.ls_append_string.append(new_raw_string)
# }}}
def replace(self, new_raw_string):
'>> replace_string. replace current line with given new_raw_string' # {{{
# self.replace_string = new_raw_string
self.raw_string = new_raw_string
# }}}
def remove(self):
'remove current line' # {{{
self.is_removed = True
# }}}
def __str__(self):
'for write back' # {{{
if hasattr(self, 'is_removed') and self.is_removed:
s = ''
else:
s = self.raw_string
# if hasattr(self, 'replace_string'):
# s = self.replace_string
if hasattr(self, 'ls_append_string'):
if (len(self.ls_append_string) == 1):
s += self.ls_append_string[0]
else:
for append in self.ls_append_string:
s += append
return s
# }}}
#===========================================================
# lib_obj_t
#===========================================================
dt_pat = dict()
# the begin line of multi-line object
dt_pat['multi_begin'] = pat_t('multi_begin', r'^(.+)\((.*)\){$')
# the end line of multi-line object
dt_pat['multi_end'] = pat_t('multi_end', '^}$')
# single line object defined with colon
dt_pat['single_colon'] = pat_t('single_colon', r'^(.+):(.+);$')
# single line object defined with paren
dt_pat['single_paren'] = pat_t('single_paren', r'^(.+)\((.+)\);$')
class lib_obj_t:
""" object of LIB
1. begin_line_num
2. end_line_num
3. ls_all_line/ls_line
4. ls_sub_obj
"""
def __init__(self, begin_line_num, ls_all_line):
# begin_line_nu/end_line_num, ls_all_line, ls_sub_obj {{{
self.begin_line_num = begin_line_num
self.end_line_num = 0
self.ls_all_line = ls_all_line # pointer to the ls_all_line, shared by all lib_obj in the same library
self.find_end_line()
self.ls_line = self.ls_all_line[self.begin_line_num-1:self.end_line_num]
self.ls_sub_obj = list() # list of sub lib_obj_t
self.find_sub_obj() # recursively find sub_obj
# }}}
def find_end_line(self):
# {{{
# single-line object
line = self.ls_all_line[self.begin_line_num-1]
if (line.syntax_type in ['single_colon', 'single_paren']):
self.end_line_num = line.num
return self.end_line_num
# multi-line object
level = 0
for line in self.ls_all_line[self.begin_line_num-1:]:
if (line.syntax_type == 'multi_begin'):
level += 1
elif (line.syntax_type == 'multi_end'):
level -= 1
if (level == 0):
self.end_line_num = line.num
return self.end_line_num
# no found
raise RuntimeError, 'cannot find end of lib_obj (%s:%s) begin @ %d' % (self.obj_type, self.name, self.begin_line_num)
# }}}
def find_sub_obj(self):
# {{{
i = self.begin_line_num + 1 - 1 # start from the line after the begin_line
j = self.end_line_num - 1 - 1 # end at the line before the end_line
while (i <= j):
line = self.ls_all_line[i]
if (line.syntax_type in ['multi_begin', 'single_colon', 'single_paren']):
sub_obj = lib_obj_t(line.num, self.ls_all_line)
self.ls_sub_obj.append(sub_obj)
i = sub_obj.end_line_num - 1
i += 1
# }}}
def get_sub_obj(self, lib_obj_name):
'>> ls_lib_obj' # {{{
ls_lib_obj = list()
for lib_obj in self.ls_sub_obj:
if (lib_obj.begin_line_num != lib_obj.end_line_num):
line = lib_obj.ls_all_line[lib_obj.begin_line_num-1]
assert (line.syntax_type == 'multi_begin'), 'Syntax error: line %d (%s)' % (line.num, line.string)
if (fnmatch(line.ls_syntax_match[0], lib_obj_name)):
ls_lib_obj.append(lib_obj)
return ls_lib_obj
# }}}
#===========================================================
# instances: library/cell/bus/pin/timing/table
#===========================================================
class param_t:
""" liberty parameter
- name = string
- value = string
- lib_obj = lib_obj_t
- line = line_t
"""
def __init__(self, lib_obj):
# {{{
self.lib_obj = lib_obj
self.line = lib_obj.ls_all_line[lib_obj.begin_line_num-1]
assert (self.line.syntax_type in ['single_colon', 'single_paren']) and (len(self.line.ls_syntax_match) == 2), 'Syntax error: line %d (%s)' % (self.line.num, self.line.string)
self.name = self.line.ls_syntax_match[0]
self.value = self.line.ls_syntax_match[1]
# }}}
def __str__(self):
return '%s=%s' % (self.name, self.value)
def __eq__(self, other):
return (self.name == other.name) and (self.value == other.value)
class inst_t:
""" liberty instance
- type = string (library|cell|bus|pin|timing|table|operating_conditions|lu_table_template)
- name = string
- ls_param = list of param_t
- dt_param[name] = param_t
- ls_{sub_inst_type} = list of sub inst_t
- ls_sub_inst = a full list of all inst_t
- dt_{sub_inst_type}[sub_inst_name] = sub inst_t
- dt_sub_inst[sub_inst_type][sub_inst_name] = sub inst_t
- lib_obj = lib_obj_t
- title_line = line_t
"""
def __init__(self, lib_obj):
# {{{
self.lib_obj = lib_obj
assert (self.lib_obj.begin_line_num != self.lib_obj.end_line_num)
# type
self.type = ''
# name
self.title_line = self.lib_obj.ls_all_line[self.lib_obj.begin_line_num-1]
self.name = self.title_line.ls_syntax_match[1]
# parameter
self.ls_param = list()
self.dt_param = dict()
for lib_obj in self.lib_obj.ls_sub_obj:
if (lib_obj.begin_line_num == lib_obj.end_line_num):
p = param_t(lib_obj)
self.ls_param.append(p)
self.dt_param[p.name] = p
self.ls_param.sort(key=lambda x: x.name)
# sub instance: run by each instance type
self.ls_sub_inst = list()
self.dt_sub_inst = dict()
# }}}
def init_sub_inst(self, sub_inst_type, sub_inst_pat=''):
'use dynamic code to construct sub-instance members: ls_*, dt_*, ls_sub_inst, dt_sub_inst' # {{{
class_name = sub_inst_type + '_t'
if (sub_inst_pat == ''):
sub_inst_pat = sub_inst_type
self.dt_sub_inst[sub_inst_type] = dict()
self.__dict__['ls_' + sub_inst_type] = list()
self.__dict__['dt_' + sub_inst_type] = dict()
for lib_obj in self.lib_obj.get_sub_obj(sub_inst_pat):
n = eval(class_name + '(lib_obj)') # dynamic construct a new inst_t instance
n.type = sub_inst_type
# ls_*
self.__dict__['ls_' + sub_inst_type].append(n)
# dt_*
assert n.name not in self.__dict__['dt_' + sub_inst_type].keys()
self.__dict__['dt_' + sub_inst_type][n.name] = n
# dt_sub_inst
self.dt_sub_inst[sub_inst_type][n.name] = n
self.__dict__['ls_' + sub_inst_type].sort(key=lambda x: x.name)
# ls_sub_inst
self.ls_sub_inst += self.__dict__['ls_' + sub_inst_type]
# }}}
def __str__(self):
# {{{
return '%s(%s)' % (self.type, self.name)
# }}}
def __eq__(self, other):
# {{{
return (str(self) == str(other))
# }}}
#-------------------------------------------------------
class library_t(inst_t):
def __init__(self, lib_obj):
# {{{
inst_t.__init__(self, lib_obj)
self.init_sub_inst('operating_conditions')
self.init_sub_inst('lu_table_template')
self.init_sub_inst('cell')
# }}}
#-------------------------------------------------------
class lu_table_template_t(inst_t):
def __init__(self, lib_obj):
# {{{
inst_t.__init__(self, lib_obj)
# }}}
def __eq__(self, other):
# {{{
if (self.name != other.name):
return False
if (len(self.ls_param) != len(other.ls_param)):
return False
for (self_param, other_param) in zip(self.ls_param, self.other_param):
if (self_param != other_param):
return False
# }}}
#-------------------------------------------------------
class operating_conditions_t(inst_t):
'name'
def __init__(self, lib_obj):
# {{{
inst_t.__init__(self, lib_obj)
# }}}
def __eq__(self, other):
# {{{
return (self.name == other.name) and (float(self.dt_param['voltage']) == float(other.dt_param['voltage'])) and (float(self.dt_param['temperature']) == float(other.dt_param['temperature']))
# }}}
#-------------------------------------------------------
class cell_t(inst_t):
'name/ls_bus/ls_pin/ls_all_pin/dt_all_pin'
def __init__(self, lib_obj):
# {{{
inst_t.__init__(self, lib_obj)
self.init_sub_inst('bus')
self.init_sub_inst('pin')
self.ls_all_pin = list()
self.ls_all_pin += self.ls_pin
for bus in self.ls_bus:
self.ls_all_pin += bus.ls_pin
self.dt_all_pin = dict()
for pin in self.ls_all_pin:
self.dt_all_pin[pin.name] = pin
# }}}
#-------------------------------------------------------
class bus_t(inst_t):
'name/ls_pin'
def __init__(self, lib_obj):
# {{{
inst_t.__init__(self, lib_obj)
self.init_sub_inst('pin')
self.init_sub_inst('timing')
self.ls_timing.sort(key=lambda x: x.name)
# }}}
#-------------------------------------------------------
class pin_t(inst_t):
'name/ls_timing'
def __init__(self, lib_obj):
# {{{
inst_t.__init__(self, lib_obj)
self.init_sub_inst('timing')
self.ls_timing.sort(key=lambda x: x.name)
# }}}
#-------------------------------------------------------
class timing_t(inst_t):
'name/ls_table'
def __init__(self, lib_obj):
# {{{
inst_t.__init__(self, lib_obj)
# distinguish name
self.name = self.dt_param['related_pin'].value + '.' + self.dt_param['timing_type'].value
if ('timing_sense' in self.dt_param.keys()):
self.name += '.' + self.dt_param['timing_sense'].value
self.init_sub_inst('table', sub_inst_pat='*')
# }}}
#-------------------------------------------------------
class table_t(inst_t):
'type/template'
def __init__(self, lib_obj):
# {{{
inst_t.__init__(self, lib_obj)
title_line = self.lib_obj.ls_all_line[lib_obj.begin_line_num-1]
self.type = title_line.ls_syntax_match[0]
self.template = title_line.ls_syntax_match[1]
self.name = self.type
# }}}
def __str__(self):
# {{{
return '%s(%s)' % (self.type, self.template)
# }}}
def __eq__(self, other):
# {{{
if (str(self) != str(other)):
return False
if (self.ls_param['index_1'] != other.ls_param['index_1']):
return False
if ('index_2' in self.ls_param.keys()):
if (self.ls_param['index_2'] != other.ls_param['index_2']):
return False
return True
# }}}
################################################################
if __name__ == '__main__':
lib_test = lib_file_t('./test/test.lib')
# lib_test_err = lib_file_t('./test/test_error.lib')
# lib_full = lib_file_t('./test/full.lib')
lib_test.write_back('./new.lib')
print 'done'
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the "test" module for the validate_bom script.
It is responsible for coordinating and running the integration tests.
The test catalog is read from --test_profiles (default all_tests.yaml).
There are two parts to the catalog: "aliases" and "tests".
The "tests" is a dictionary of tests. Each entry is keyed by the name of the
test. A test has the following structure:
test_name:
requires:
configuration:
<commandline option>: <value>
services: [<microservice name>]
quota:
<resource>: <uses>
api: <primary microservice>
args:
alias: [<alias name>]
<command line flag>: <value>
The test_name.requires specifies the requirements in order to run the test.
If a requirement is not satisfied, the test will be skipped.
The test_name.requires.configuration specifies expected options and values.
These are the same names as parameters to the validate_bom__main executable.
Typically this is used to guard a test for a particular configuration (e.g.
dont test against a platform if the platform was not enabled in the
deployment).
The test_name.requires.services is a list of services that the test requires
either directly or indirectly. This is used to ensure that the services are
ready before running the test. If the service is alive but not healthy then
the test will be failed automatically without even running it (provided it
wouldnt have been skipped).
The test_name.api is used to specify the primary microservice that the test
uses. This is used to determine which port to pass to the test since the remote
ports are forwarded to unused local ports known only to this test controller.
The test_name.args are the commandline arguments to pass to the test.
The names of the arguments are the test's argument names without the
prefixed '--'. If the value begins with a '$' then the remaining value
refers to the name of an option whose value should become the argument.
A special argument "aliases" is a list of aliases. These are names that
match the key of an entry in the "aliases" part of the file where all the
name/value pairs defined for the alias are bulk added as arguments.
The test_name.quota is used to rate-limit test execution where tests are
sensitive to resource costs. Arbitrary names can be limited using
--test_quota. The controller will use this as a semaphore to rate-limit
test execution for these resources. Unrestricted resources wont rate-limit.
If the cost bigger than the total semaphore capacity then the test will
be given all the quota once all is available.
There is an overall rate-limiting semaphore on --test_concurrency for
how many tests can run at a time. This is enforced at the point of execution,
after all the setup and filtering has taken place.
"""
# pylint: disable=broad-except
from multiprocessing.pool import ThreadPool
import atexit
import collections
import logging
import math
import os
import re
import ssl
import subprocess
import socket
import threading
import time
import traceback
import yaml
try:
from urllib2 import urlopen, Request, HTTPError, URLError
except ImportError:
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
from buildtool import (
add_parser_argument,
determine_subprocess_outcome_labels,
check_subprocess,
check_subprocesses_to_logfile,
raise_and_log_error,
ConfigError,
ResponseError,
TimeoutError,
UnexpectedError)
from validate_bom__deploy import replace_ha_services
from iap_generate_google_auth_token import (
generate_auth_token,
get_service_account_email)
ForwardedPort = collections.namedtuple('ForwardedPort', ['child', 'port'])
def _unused_port():
"""Find a port that is not currently in use."""
# pylint: disable=unused-variable
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
addr, port = sock.getsockname()
sock.close()
return port
class QuotaTracker(object):
"""Manages quota for individual resources.
Note that this quota tracker is purely logical. It does not relate to the
real world. Others may be using the actual quota we have. This is only
regulating the test's use of the quota.
"""
MAX_QUOTA_METRIC_NAME = 'ResourceQuotaMax'
FREE_QUOTA_METRIC_NAME = 'ResourceQuotaAvailable'
INSUFFICIENT_QUOTA_METRIC_NAME = 'ResourceQuotaShortage'
def __init__(self, max_counts, metrics):
"""Constructor.
Args:
max_counts: [dict] The list of resources and quotas to manage.
"""
self.__counts = dict(max_counts)
self.__max_counts = dict(max_counts)
self.__condition_variable = threading.Condition()
self.__metrics = metrics
for name, value in max_counts.items():
labels = {'resource': name}
self.__metrics.set(self.MAX_QUOTA_METRIC_NAME, labels, value)
self.__metrics.set(self.FREE_QUOTA_METRIC_NAME, labels, value)
def acquire_all_safe(self, who, quota):
"""Acquire the desired quota, if any.
This is thread-safe and will block until it can be satisified.
Args:
who: [string] Who is asking, for logging purposes.
quota: [dict] The desired quota for each keyed resource, if any.
Returns:
The quota acquired.
"""
got = None
with self.__condition_variable:
got = self.acquire_all_or_none_unsafe(who, quota)
while got is None:
logging.info('"%s" waiting on quota %s', who, quota)
self.__condition_variable.wait()
got = self.acquire_all_or_none_unsafe(who, quota)
return got
def acquire_all_or_none_safe(self, who, quota):
"""Acquire the desired quota, if any.
This is thread-safe, however will return None rather than block.
Args:
who: [string] Who is asking, for logging purposes.
quota: [dict] The desired quota for each keyed resource, if any.
Returns:
The quota acquired if successful, or None if not.
"""
with self.__condition_variable:
return self.acquire_all_or_none_unsafe(who, quota)
def acquire_all_or_none_unsafe(self, who, quota):
"""Acquire the desired quota, if any.
This is not thread-safe so should be called while locked.
Args:
who: [string] Who is asking, for logging purposes.
quota: [dict] The desired quota for each keyed resource, if any.
Returns:
The quota acquired if successful, or None if not.
"""
if not quota:
return {}
logging.info('"%s" attempting to acquire quota %s', who, quota)
acquired = {}
have_all = True
for key, value in quota.items():
got = self.__acquire_resource_or_none(key, value)
if not got:
have_all = False # Be lazy so we can record all the missing quota
else:
acquired[key] = got
if have_all:
return acquired
self.release_all_unsafe(who, acquired)
return None
def release_all_safe(self, who, quota):
"""Release all the resource quota.
Args:
who: [string] Who is releasing, for logging purposes.
quota: [dict] The non-None result from an acquire_all* method.
"""
with self.__condition_variable:
self.release_all_unsafe(who, quota)
self.__condition_variable.notify_all()
def release_all_unsafe(self, who, quota):
"""Release all the resource quota.
This is not thread-safe so should be called while locked.
Args:
who: [string] Who is releasing, for logging purposes.
quota: [dict] The non-None result from an acquire_all* method.
"""
if not quota:
return
logging.debug('"%s" releasing quota %s', who, quota)
for key, value in quota.items():
self.__release_resource(key, value)
def __acquire_resource_or_none(self, name, count):
"""Attempt to acquire some amount of quota.
Args:
name: [string] The name of the resource we're acquiring.
count: [int] The amount of the resource
Returns:
The amount we were given. This is either all or none. If non-zero
but less than we asked for, then it gave us the max quota it has.
In order for this to be the case, it must have all the quota available.
Otherwise it will return 0.
"""
have = self.__counts.get(name)
if have is None:
return count
if have >= count:
self.__counts[name] = have - count
self.__metrics.set(
self.FREE_QUOTA_METRIC_NAME, {'resource': name}, self.__counts[name])
return count
max_count = self.__max_counts[name]
if have == max_count:
logging.warning('Quota %s has a max of %d but %d is desired.'
' Acquiring all the quota as a best effort.',
name, max_count, count)
self.__counts[name] = 0
self.__metrics.set(
self.FREE_QUOTA_METRIC_NAME, {'resource': name}, 0)
return have
logging.warning('Quota %s has %d remaining, but %d are needed.'
' Rejecting the request for now.',
name, have, count)
self.__metrics.inc_counter(
self.INSUFFICIENT_QUOTA_METRIC_NAME, {'resource': name},
amount=count - have)
return 0
def __release_resource(self, name, count):
"""Restores previously acquired resource quota."""
have = self.__counts.get(name, None)
if have is not None:
self.__counts[name] = have + count
self.__metrics.set(
self.FREE_QUOTA_METRIC_NAME, {'resource': name}, self.__counts[name])
class ValidateBomTestController(object):
"""The test controller runs integration tests against a deployment."""
@property
def test_suite(self):
"""Returns the main test suite loaded from --test_suite."""
return self.__test_suite
@property
def options(self):
"""The configuration options."""
return self.__deployer.options
@property
def passed(self):
"""Returns the passed tests and reasons."""
return self.__passed
@property
def failed(self):
"""Returns the failed tests and reasons."""
return self.__failed
@property
def skipped(self):
"""Returns the skipped tests and reasons."""
return self.__skipped
@property
def exit_code(self):
"""Determine final exit code for all tests."""
return -1 if self.failed else 0
def __close_forwarded_ports(self):
for forwarding in self.__forwarded_ports.values():
try:
forwarding[0].kill()
except Exception as ex:
logging.error('Error terminating child: %s', ex)
def __collect_gce_quota(self, project, region,
project_percent=100.0, region_percent=100.0):
project_info_json = check_subprocess('gcloud compute project-info describe'
' --format yaml'
' --project %s' % project)
project_info = yaml.safe_load(project_info_json)
# Sometimes gce returns entries and leaves out the a "metric" it was for.
# We'll ignore those and stick them in 'UNKNOWN' for simplicity.
project_quota = {'gce_global_%s' % info.get('metric', 'UNKNOWN'):
int(max(1, math.floor(
project_percent * (info['limit'] - info['usage']))))
for info in project_info['quotas']}
region_info_json = check_subprocess('gcloud compute regions describe'
' --format yaml'
' --project %s'
' %s' % (project, region))
region_info = yaml.safe_load(region_info_json)
region_quota = {
'gce_region_%s' % info.get('metric', 'UNKNOWN'): int(max(
1, math.floor(region_percent * (info['limit'] - info['usage']))))
for info in region_info['quotas']
}
return project_quota, region_quota
def __init__(self, deployer):
options = deployer.options
quota_spec = {}
if options.google_account_project:
project_quota, region_quota = self.__collect_gce_quota(
options.google_account_project, options.test_gce_quota_region,
project_percent=options.test_gce_project_quota_factor,
region_percent=options.test_gce_region_quota_factor)
quota_spec.update(project_quota)
quota_spec.update(region_quota)
if options.test_default_quota:
quota_spec.update({
parts[0].strip(): int(parts[1])
for parts in [entry.split('=')
for entry in options.test_default_quota.split(',')]
})
if options.test_quota:
quota_spec.update(
{parts[0].strip(): int(parts[1])
for parts in [entry.split('=')
for entry in options.test_quota.split(',')]})
self.__quota_tracker = QuotaTracker(quota_spec, deployer.metrics)
self.__deployer = deployer
self.__lock = threading.Lock()
self.__passed = [] # Resulted in success
self.__failed = [] # Resulted in failure
self.__skipped = [] # Will not run at all
with open(options.test_profiles, 'r') as fd:
self.__test_suite = yaml.safe_load(fd)
self.__extra_test_bindings = (
self.__load_bindings(options.test_extra_profile_bindings)
if options.test_extra_profile_bindings
else {}
)
num_concurrent = len(self.__test_suite.get('tests')) or 1
num_concurrent = int(min(num_concurrent,
options.test_concurrency or num_concurrent))
self.__semaphore = threading.Semaphore(num_concurrent)
# dictionary of service -> ForwardedPort
self.__forwarded_ports = {}
atexit.register(self.__close_forwarded_ports)
# Map of service names to native ports.
self.__service_port_map = {
# These are critical to most tests.
'clouddriver': 7002,
'clouddriver-caching': 7002,
'clouddriver-rw': 7002,
'clouddriver-ro': 7002,
'clouddriver-ro-deck': 7002,
'gate': 8084,
'front50': 8080,
# Some tests needed these too.
'orca': 8083,
'rosco': 8087,
'igor': 8088,
'echo': 8089,
'echo-scheduler': 8089,
'echo-worker': 8089
}
# Map of services with provided endpoints and credentials.
self.__public_service_configs = {}
self.__add_gate_service_config(self.__public_service_configs)
def __add_gate_service_config(self, configs):
if not self.options.test_gate_service_base_url:
return
service_config = {
'base_url': self.options.test_gate_service_base_url,
}
credentials_path = self.options.test_gate_iap_credentials # This can be None, which would mean we use the Application Default Credentials
client_id = self.options.test_gate_iap_client_id
impersonated_service_account = self.options.test_gate_iap_impersonated_service_account
if client_id:
service_config['service_account_email'] = impersonated_service_account or get_service_account_email(credentials_path)
service_config['bearer_auth_token'] = generate_auth_token(client_id,
service_account_file=credentials_path,
impersonate_service_account_email=impersonated_service_account)
configs['gate'] = service_config
def __bearer_auth_token_or_none(self, service_name, client_id, credentials_path=None):
return generate_auth_token(client_id, credentials_path)
def __replace_ha_api_service(self, service, options):
transform_map = {}
if options.ha_clouddriver_enabled:
transform_map['clouddriver'] = 'clouddriver-rw'
if options.ha_echo_enabled:
transform_map['echo'] = ['echo-worker']
return transform_map.get(service, service)
def __load_bindings(self, path):
with open(path, 'r') as stream:
content = stream.read()
result = {}
for line in content.split('\n'):
match = re.match('^([a-zA-Z][^=])+=(.*)', line)
if match:
result[match.group(1).strip()] = match.group(2).strip()
def __forward_port_to_service(self, service_name):
"""Forward ports to the deployed service.
This is private to ensure that it is called with the lock.
The lock is needed to mitigate a race condition. See the
inline comment around the Popen call.
"""
local_port = _unused_port()
remote_port = self.__service_port_map[service_name]
command = self.__deployer.make_port_forward_command(
service_name, local_port, remote_port)
logging.info('Establishing connection to %s with port %d',
service_name, local_port)
# There seems to be an intermittent race condition here.
# Not sure if it is gcloud or python.
# Locking the individual calls seems to work around it.
#
# We dont need to lock because this function is called from within
# the lock already.
logging.debug('RUNNING %s', ' '.join(command))
# Redirect stdout to prevent buffer overflows (at least in k8s)
# but keep errors for failures.
class KeepAlive(threading.Thread):
def run(self):
while True:
try:
logging.info('KeepAlive %s polling', service_name)
got = urlopen('http://localhost:{port}/health'
.format(port=local_port))
logging.info('KeepAlive %s -> %s', service_name, got.getcode())
except Exception as ex:
logging.info('KeepAlive %s -> %s', service_name, ex)
time.sleep(20)
if self.options.deploy_spinnaker_type == 'distributed':
# For now, distributed deployments are k8s
# and K8s port forwarding with kubectl requires keep alive.
hack = KeepAlive()
hack.setDaemon(True)
hack.start()
logfile = os.path.join(
self.options.output_dir,
'port_forward_%s-%d.log' % (service_name, os.getpid()))
stream = open(logfile, 'w')
stream.write(str(command) + '\n\n')
logging.debug('Logging "%s" port forwarding to %s', service_name, logfile)
child = subprocess.Popen(
command,
stderr=subprocess.STDOUT,
stdout=stream)
return ForwardedPort(child, local_port)
def build_summary(self):
"""Return a summary of all the test results."""
def append_list_summary(summary, name, entries):
"""Write out all the names from the test results."""
if not entries:
return
summary.append('{0}:'.format(name))
for entry in entries:
summary.append(' * {0}'.format(entry[0]))
options = self.options
if not options.testing_enabled:
return 'No test output: testing was disabled.', 0
summary = ['\nSummary:']
append_list_summary(summary, 'SKIPPED', self.skipped)
append_list_summary(summary, 'PASSED', self.passed)
append_list_summary(summary, 'FAILED', self.failed)
num_skipped = len(self.skipped)
num_passed = len(self.passed)
num_failed = len(self.failed)
summary.append('')
if num_failed:
summary.append(
'FAILED {0} of {1}, skipped {2}'.format(
num_failed, (num_failed + num_passed), num_skipped))
else:
summary.append('PASSED {0}, skipped {1}'.format(num_passed, num_skipped))
return '\n'.join(summary)
def wait_on_service(self, service_name, port=None, timeout=None):
"""Wait for the given service to be available on the specified port.
Args:
service_name: [string] The service name we we are waiting on.
port: [int] The remote port the service is at.
timeout: [int] How much time to wait before giving up.
Returns:
The ForwardedPort entry for this service.
"""
try:
with self.__lock:
forwarding = self.__forwarded_ports.get(service_name)
if forwarding is None:
forwarding = self.__forward_port_to_service(service_name)
self.__forwarded_ports[service_name] = forwarding
except Exception:
logging.exception('Exception while attempting to forward ports to "%s"',
service_name)
raise
timeout = timeout or self.options.test_service_startup_timeout
end_time = time.time() + timeout
logging.info('Waiting on "%s"...', service_name)
if port is None:
port = self.__service_port_map[service_name]
# It seems we have a race condition in the poll
# where it thinks the jobs have terminated.
# I've only seen this happen once.
time.sleep(1)
threadid = hex(threading.current_thread().ident)
logging.info('WaitOn polling %s from thread %s', service_name, threadid)
while forwarding.child.poll() is None:
try:
# localhost is hardcoded here because we are port forwarding.
# timeout=20 is to appease kubectl port forwarding, which will close
# if left idle for 30s
urlopen('http://localhost:{port}/health'
.format(port=forwarding.port),
timeout=20)
logging.info('"%s" is ready on port %d | %s',
service_name, forwarding.port, threadid)
return forwarding
except HTTPError as error:
logging.warning('%s got %s. Ignoring that for now.',
service_name, error)
return forwarding
except (URLError, Exception) as error:
if time.time() >= end_time:
logging.error(
'Timing out waiting for %s | %s', service_name, threadid)
raise_and_log_error(TimeoutError(service_name, cause=service_name))
time.sleep(2.0)
logging.error('It appears %s is no longer available.'
' Perhaps the tunnel closed.',
service_name)
raise_and_log_error(
ResponseError('It appears that {0} failed'.format(service_name),
server='tunnel'))
def __validate_service_base_url(self, service_name, timeout=None):
service_config = self.__public_service_configs[service_name]
base_url = service_config['base_url']
timeout = timeout or self.options.test_service_startup_timeout
end_time = time.time() + timeout
logging.info('Validating base URL of "%s"...', service_name)
try:
url = '{base_url}/health'.format(base_url=base_url)
request = Request(url=url)
if 'bearer_auth_token' in service_config:
request.add_header('Authorization', 'Bearer {}'.format(service_config['bearer_auth_token']))
context = None
if self.options.test_ignore_ssl_cert_verification:
context = ssl._create_unverified_context()
urlopen(request, context=context)
logging.info('"%s" is ready on service endpoint %s',
service_name, base_url)
return
except HTTPError as error:
logging.error('%s service endpoint got %s.',
service_name, error)
raise_and_log_error(
ResponseError('{0} service endpoint got {1}'.format(service_name, error),
server=base_url))
except Exception as error:
raise_and_log_error(
ResponseError('{0} service endpoint got {1}'.format(service_name, error),
server=base_url))
except URLError as error:
if time.time() >= end_time:
logging.error(
'Timing out waiting for %s', service_name)
raise_and_log_error(TimeoutError(service_name, cause=service_name))
raise
def run_tests(self):
"""The actual controller that coordinates and runs the tests.
This attempts to process all the tests concurrently across
seperate threads, where each test will:
(1) Determine whether or not the test is a candidate
(passes the --test_include / --test_exclude criteria)
(2) Evaluate the test's requirements.
If the configuration requirements are not met then SKIP the test.
(a) Attempt to tunnel each of the service tests, sharing existing
tunnels used by other tests. The tunnels allocate unused local
ports to avoid potential conflict within the local machine.
(b) Wait for the service to be ready. Ideally this means it is
healthy, however we'll allow unhealthy services to proceed
as well and let those tests run and fail in case they are
testing unhealthy service situations.
(c) If there is an error or the service takes too long then
outright FAIL the test.
(3) Acquire the quota that the test requires.
* If the quota is not currently available, then block the
thread until it is. Since each test is in its own thread, this
will not impact other tests.
* Quota are only internal resources within the controller.
This is used for purposes of rate limiting, etc. It does not
coordinate with the underlying platforms.
* Quota is governed with --test_quota. If a test requests
a resource without a known quota, then the quota is assumed
to be infinite.
(4) Grab a semaphore used to rate limit running tests.
This is controlled by --test_concurrency, which defaults to all.
(5) Run the test.
(6) Release the quota and semaphore to unblock other tests.
(7) Record the outcome as PASS or FAIL
If an exception is thrown along the way, the test will automatically
be recorded as a FAILURE.
Returns:
(#passed, #failed, #skipped)
"""
options = self.options
if not options.testing_enabled:
logging.info('--testing_enabled=false skips test phase entirely.')
return 0, 0, 0
all_test_profiles = self.test_suite['tests']
logging.info(
'Running tests (concurrency=%s).',
options.test_concurrency or 'infinite')
thread_pool = ThreadPool(len(all_test_profiles))
thread_pool.map(self.__run_or_skip_test_profile_entry_wrapper,
all_test_profiles.items())
thread_pool.terminate()
logging.info('Finished running tests.')
return len(self.__passed), len(self.__failed), len(self.__skipped)
def __run_or_skip_test_profile_entry_wrapper(self, args):
"""Outer wrapper for running tests
Args:
args: [dict entry] The name and spec tuple from the mapped element.
"""
test_name = args[0]
spec = args[1]
metric_labels = {'test_name': test_name, 'skipped': ''}
try:
self.__run_or_skip_test_profile_entry(test_name, spec, metric_labels)
except Exception as ex:
logging.error('%s threw an exception:\n%s',
test_name, traceback.format_exc())
with self.__lock:
self.__failed.append((test_name, 'Caught exception {0}'.format(ex)))
def __record_skip_test(self, test_name, reason, skip_code, metric_labels):
logging.warning(reason)
self.__skipped.append((test_name, reason))
copy_labels = dict(metric_labels)
copy_labels['skipped'] = skip_code
copy_labels['success'] = 'Skipped'
self.__deployer.metrics.observe_timer(
'RunTestScript' + '_Outcome', copy_labels, 0.0)
def __run_or_skip_test_profile_entry(self, test_name, spec, metric_labels):
"""Runs a test from within the thread-pool map() function.
Args:
test_name: [string] The name of the test.
spec: [dict] The test profile specification.
"""
options = self.options
if not re.search(options.test_include, test_name):
reason = ('Skipped test "{name}" because it does not match explicit'
' --test_include criteria "{criteria}".'
.format(name=test_name, criteria=options.test_include))
self.__record_skip_test(test_name, reason,
'NotExplicitInclude', metric_labels)
return
if options.test_exclude and re.search(options.test_exclude, test_name):
reason = ('Skipped test "{name}" because it matches explicit'
' --test_exclude criteria "{criteria}".'
.format(name=test_name, criteria=options.test_exclude))
self.__record_skip_test(test_name, reason,
'ExplicitExclude', metric_labels)
return
# This can raise an exception
self.run_test_profile_helper(test_name, spec, metric_labels)
def validate_test_requirements(self, test_name, spec, metric_labels):
"""Determine whether or not the test requirements are satisfied.
If not, record the reason a skip or failure.
This may throw exceptions, which are immediate failure.
Args:
test_name: [string] The name of the test.
spec: [dict] The profile specification containing requirements.
This argument will be pruned as values are consumed from it.
Returns:
True if requirements are satisfied, False if not.
"""
if not 'api' in spec:
raise_and_log_error(
UnexpectedError('Test "{name}" is missing an "api" spec.'.format(
name=test_name)))
requires = spec.pop('requires', {})
configuration = requires.pop('configuration', {})
our_config = vars(self.options)
for key, value in configuration.items():
if key not in our_config:
message = ('Unknown configuration key "{0}" for test "{1}"'
.format(key, test_name))
raise_and_log_error(ConfigError(message))
if value != our_config[key]:
reason = ('Skipped test {name} because {key}={want} != {have}'
.format(name=test_name, key=key,
want=value, have=our_config[key]))
with self.__lock:
self.__record_skip_test(test_name, reason,
'IncompatableConfig', metric_labels)
return False
services = set(replace_ha_services(
requires.pop('services', []), self.options))
services.add(self.__replace_ha_api_service(
spec.pop('api'), self.options))
if requires:
raise_and_log_error(
ConfigError('Unexpected fields in {name}.requires: {remaining}'
.format(name=test_name, remaining=requires)))
if spec:
raise_and_log_error(
ConfigError('Unexpected fields in {name} specification: {remaining}'
.format(name=test_name, remaining=spec)))
for service in self.__public_service_configs:
self.__validate_service_base_url(service)
if self.options.test_wait_on_services:
def wait_on_services(services):
thread_pool = ThreadPool(len(services))
thread_pool.map(self.wait_on_service, services)
thread_pool.terminate()
self.__deployer.metrics.track_and_time_call(
'WaitingOnServiceAvailability',
metric_labels, self.__deployer.metrics.default_determine_outcome_labels,
wait_on_services, services)
else:
logging.warning('Skipping waiting for services')
return True
def add_extra_arguments(self, test_name, args, commandline):
"""Add extra arguments to the commandline.
Args:
test_name: [string] Name of test specifying the options.
args: [dict] Specification of additioanl arguments to pass.
Each key is the name of the argument, the value is the value to pass.
If the value is preceeded with a '$' then it refers to the value of
an option. If the value is None then just add the key without an arg.
commandline: [list] The list of command line arguments to append to.
"""
option_dict = vars(self.options)
aliases_dict = self.test_suite.get('aliases', {})
for key, value in args.items():
if isinstance(value, (int, bool)):
value = str(value)
if key == 'alias':
for alias_name in value:
if not alias_name in aliases_dict:
raise_and_log_error(ConfigError(
'Unknown alias "{name}" referenced in args for "{test}"'
.format(name=alias_name, test=test_name)))
self.add_extra_arguments(
test_name, aliases_dict[alias_name], commandline)
continue
elif value is None:
pass
elif value.startswith('$'):
option_name = value[1:]
if option_name in option_dict:
value = option_dict[option_name] or '""'
elif option_name in self.__extra_test_bindings:
value = self.__extra_test_bindings[option_name] or '""'
elif option_name in os.environ:
value = os.environ[option_name]
else:
raise_and_log_error(ConfigError(
'Unknown option "{name}" referenced in args for "{test}"'
.format(name=option_name, test=test_name)))
if value is None:
commandline.append('--' + key)
else:
commandline.extend(['--' + key, value])
def make_test_command_or_none(self, test_name, spec, metric_labels):
"""Returns the command to run the test, or None to skip.
Args:
test_name: The test to run.
spec: The test specification profile.
This argument will be pruned as values are consumed from it.
Returns:
The command line argument list, or None to skip.
This may throw an exception if the spec is invalid.
This does not consider quota, which is checked later.
"""
options = self.options
microservice_api = self.__replace_ha_api_service(spec.get('api'), options)
test_rel_path = spec.pop('path', None) or os.path.join(
'citest', 'tests', '{0}.py'.format(test_name))
args = spec.pop('args', {})
if not self.validate_test_requirements(test_name, spec, metric_labels):
return None
testing_root_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'testing'))
test_path = os.path.join(testing_root_dir, test_rel_path)
citest_log_dir = os.path.join(options.log_dir, 'citest_logs')
if not os.path.exists(citest_log_dir):
try:
os.makedirs(citest_log_dir)
except:
# check for race condition
if not os.path.exists(citest_log_dir):
raise
command = [
'python', test_path,
'--log_dir', citest_log_dir,
'--log_filebase', test_name,
'--ignore_ssl_cert_verification', str(options.test_ignore_ssl_cert_verification)
]
if microservice_api in self.__public_service_configs:
service_config = self.__public_service_configs[microservice_api]
command.extend([
'--native_base_url', service_config['base_url']
])
if 'bearer_auth_token' in service_config:
command.extend([
'--bearer_auth_token', service_config['bearer_auth_token']
])
if 'service_account_email' in service_config:
command.extend([
'--test_user', service_config['service_account_email']
])
else:
command.extend([
'--native_host', 'localhost',
'--native_port', str(self.__forwarded_ports[microservice_api].port)
])
if options.test_stack:
command.extend(['--test_stack', str(options.test_stack)])
self.add_extra_arguments(test_name, args, command)
return command
def __execute_test_command(self, test_name, command, metric_labels):
metrics = self.__deployer.metrics
logging.debug('Running %s', ' '.join(command))
def run_and_log_test_script(command):
logfile = os.path.join(self.options.output_dir, 'citest_logs',
'%s-%s.console.log' % (test_name, os.getpid()))
logging.info('Logging test "%s" to %s', test_name, logfile)
try:
check_subprocesses_to_logfile('running test', logfile, [command])
retcode = 0
logging.info('Test %s PASSED -- see %s', test_name, logfile)
except:
retcode = -1
logging.info('Test %s FAILED -- see %s', test_name, logfile)
return retcode, logfile
return metrics.track_and_time_call(
'RunTestScript',
metric_labels, determine_subprocess_outcome_labels,
run_and_log_test_script, ' '.join(command))
def run_test_profile_helper(self, test_name, spec, metric_labels):
"""Helper function for running an individual test.
The caller wraps this to trap and handle exceptions.
Args:
test_name: The test being run.
spec: The test specification profile.
This argument will be pruned as values are consumed from it.
"""
quota = spec.pop('quota', {})
command = self.make_test_command_or_none(test_name, spec, metric_labels)
if command is None:
return
logging.info('Acquiring quota for test "%s"...', test_name)
quota_tracker = self.__quota_tracker
metrics = self.__deployer.metrics
acquired_quota = metrics.track_and_time_call(
'ResourceQuotaWait',
metric_labels, metrics.default_determine_outcome_labels,
quota_tracker.acquire_all_safe, test_name, quota)
if acquired_quota:
logging.info('"%s" acquired quota %s', test_name, acquired_quota)
execute_time = None
start_time = time.time()
try:
logging.info('Scheduling "%s"...', test_name)
# This will block. Note that we already acquired quota, thus
# we are blocking holding onto that quota. However since we are
# blocked awaiting a thread, nobody else can execute either,
# so it doesnt matter that we might be starving them of quota.
self.__semaphore.acquire(True)
execute_time = time.time()
wait_time = int(execute_time - start_time + 0.5)
if wait_time > 1:
logging.info('"%s" had a semaphore contention for %d secs.',
test_name, wait_time)
logging.info('Executing "%s"...', test_name)
retcode, logfile_path = self.__execute_test_command(
test_name, command, metric_labels)
finally:
logging.info('Finished executing "%s"...', test_name)
self.__semaphore.release()
if acquired_quota:
quota_tracker.release_all_safe(test_name, acquired_quota)
end_time = time.time()
delta_time = int(end_time - execute_time + 0.5)
with self.__lock:
if not retcode:
logging.info('%s PASSED after %d secs', test_name, delta_time)
self.__passed.append((test_name, logfile_path))
else:
logging.info('FAILED %s after %d secs', test_name, delta_time)
self.__failed.append((test_name, logfile_path))
def init_argument_parser(parser, defaults):
"""Add testing related command-line parameters."""
add_parser_argument(
parser, 'test_profiles',
defaults, os.path.join(os.path.dirname(__file__), 'all_tests.yaml'),
help='The path to the set of test profiles.')
add_parser_argument(
parser, 'test_extra_profile_bindings', defaults, None,
help='Path to a file with additional bindings that the --test_profiles'
' file may reference.')
add_parser_argument(
parser, 'test_concurrency', defaults, None, type=int,
help='Limits how many tests to run at a time. Default is unbounded')
add_parser_argument(
parser, 'test_service_startup_timeout', defaults, 600, type=int,
help='Number of seconds to permit services to startup before giving up.')
add_parser_argument(
parser, 'test_gce_project_quota_factor', defaults, 1.0, type=float,
help='Default percentage of available project quota to make available'
' for tests.')
add_parser_argument(
parser, 'test_gce_region_quota_factor', defaults, 1.0, type=float,
help='Default percentage of available region quota to make available'
' for tests.')
add_parser_argument(
parser, 'test_gce_quota_region', defaults, 'us-central1',
help='GCE Compute Region to gather region quota limits from.')
add_parser_argument(
parser, 'test_default_quota',
defaults, '',
help='Default quota parameters for values used in the --test_profiles.'
' This does not include GCE quota values, which are determined'
' at runtime. These value can be further overriden by --test_quota.'
' These are meant as built-in defaults, where --test_quota as'
' per-execution overriden.')
add_parser_argument(
parser, 'test_quota', defaults, '',
help='Comma-delimited name=value list of quota overrides.')
add_parser_argument(
parser, 'testing_enabled', defaults, True, type=bool,
help='If false then do not run the testing phase.')
add_parser_argument(
parser, 'test_disable', defaults, False, action='store_true',
dest='testing_enabled',
help='DEPRECATED: Use --testing_enabled=false.')
add_parser_argument(
parser, 'test_wait_on_services', defaults, True, type=bool,
help='If false then do not wait on services to be ready during'
' testing phase.')
add_parser_argument(
parser, 'test_include', defaults, '.*',
help='Regular expression of tests to run or None for all.')
add_parser_argument(
parser, 'test_exclude', defaults, None,
help='Regular expression of otherwise runnable tests to skip.')
add_parser_argument(
parser, 'test_stack', defaults, None,
help='The --test_stack to pass through to tests indicating which'
' Spinnaker application "stack" to use. This is typically'
' to help trace the source of resources created within the'
' tests.')
add_parser_argument(
parser, 'test_jenkins_job_name', defaults, 'TriggerBake',
help='The Jenkins job name to use in tests.')
add_parser_argument(
parser, 'test_gate_service_base_url', defaults, None,
help='Gate base URL (including protocol, host, and port) to use'
' rather than port-forwarding.')
add_parser_argument(
parser, 'test_gate_iap_client_id', defaults, None,
help='IAP client ID used to authenticate requests to an'
' IAP-protected Spinnaker. The inclusion of this flag'
' indicates that the Gate service is IAP-protected.')
add_parser_argument(
parser, 'test_gate_iap_credentials', defaults, None,
help='Path to google credentials file to authenticate requests'
' to an IAP-protected Spinnaker. This must be used with the'
' test_gate_iap_client_id flag.'
' If left empty then use Application Default Credentials.')
add_parser_argument(
parser, 'test_gate_iap_impersonated_service_account', defaults, None,
help='Service account to impersonate to receive the credentials'
' to make authenticated requests to an IAP-protected Spinnaker.'
' If test_gate_iap_credentials is provided, the service account'
' specified by test_gate_iap_credentials will impersonate this'
' service account. If test_gate_iap_credentials is not provided,'
' the Application Default Credentials will be used to impersonate'
' this service account. This must be used with the'
' test_gate_iap_client_id flag.'
' If left empty then no service account will be impersonated.')
add_parser_argument(
parser, 'test_ignore_ssl_cert_verification', defaults, False, type=bool,
help='Whether or not to ignore SSL certificate verification when making'
' requests to Spinnaker. This is False by default.')
add_parser_argument(
parser, 'test_appengine_region', defaults, 'us-central',
help='Region to use for AppEngine tests.')
def validate_options(options):
"""Validate testing related command-line parameters."""
if not os.path.exists(options.test_profiles):
raise_and_log_error(
ConfigError('--test_profiles "{0}" does not exist.'.format(
options.test_profiles)))
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "git-describe"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "xym/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
|
from nbt import nbt
from nbt.nbt import (TAG_Short, TAG_Compound, TAG_String, TAG_List, TAG_Float,
TAG_Byte, TAG_Long, TAG_Double, TAG_Int)
import mc_objects
import struct
import uuid
def get_tag_for_type(tag_type):
if tag_type == 'Byte':
return TAG_Byte
elif tag_type == 'Int':
return TAG_Int
elif tag_type == 'Float':
return TAG_Float
elif tag_type == 'Short':
return TAG_Short
elif tag_type == 'Long':
return TAG_Long
elif tag_type == 'String':
return TAG_String
elif tag_type == 'List':
return TAG_List
elif tag_type == 'Compound':
return TAG_Compound
else:
raise NotImplementedError(tag_type)
def load_spawn_potential_from_tag(tag):
return SpawnPotential(load_mob_from_tag(tag['Entity']),
weight=tag['Weight'].value)
def load_spawner_from_tag(tag):
potentials = []
delay = tag['Delay'].value
min_delay = tag['MinSpawnDelay'].value
max_delay = tag['MaxSpawnDelay'].value
spawn_count = tag['SpawnCount'].value
spawn_range = tag['SpawnRange'].value
if 'SpawnPotentials' in tag:
potentials_subtag = tag['SpawnPotentials']
for potent in potentials_subtag:
potentials.append(load_spawn_potential_from_tag(potent))
if 'MaxNearbyEntities' in tag:
max_nearby_entities = tag['MaxNearbyEntities'].value
else:
max_nearby_entities = None
if 'RequiredPlayerRange' in tag:
required_player_range = tag['RequiredPlayerRange'].value
else:
required_player_range = None
return Spawner(potentials, spawn_count=spawn_count,
spawn_range=spawn_range, delay=delay,
min_spawn_delay=min_delay, max_spawn_delay=max_delay,
required_player_range=required_player_range,
max_nearby_entities=max_nearby_entities)
def load_mob_from_tag(tag):
attributes = []
effects = []
passengers = []
mob_id = tag['id'].value
if 'Attributes' in tag:
attribute_tags = tag['Attributes']
for attr in attribute_tags:
attributes.append(Attribute(
attr['AttributeName'].value, attr['Amount'].value,
op_choice=attr['Operation'].value,
attr_uuid=(attr['UUIDLeast'].value, attr['UUIDMost'].value)))
if 'ActiveEffects' in tag:
effect_tags = tag['ActiveEffects']
for pot in effect_tags:
effects.append(Effect(
pot['id'].value, pot['Amplifier'].value,
pot['Duration'].value,
ambient=pot['Ambient'].value,
show_particles=pot['ShowParticles'].value
))
if 'Passengers' in tag:
passenger_tags = tag['Passengers']
for passenger in passenger_tags:
passengers.append(load_mob_from_tag(passenger))
if 'Glowing' in tag:
glowing = tag['Glowing'].value
else:
glowing = False
if 'LeftHanded' in tag:
left_handed = tag['LeftHanded'].value
else:
left_handed = False
if 'Silent' in tag:
silent = tag['Silent'].value
else:
silent = False
if 'CanPickUpLoot' in tag:
can_pickup_loot = tag['CanPickUpLoot'].value
else:
can_pickup_loot = False
if 'CustomNameVisible' in tag:
custom_name_visible = tag['CustomNameVisible'].value
else:
custom_name_visible = False
if 'CustomName' in tag:
custom_name = tag['CustomName'].value
else:
custom_name = None
if 'AbsorptionAmount' in tag:
absorbtion_amount = tag['AbsorptionAmount'].value
else:
absorbtion_amount = 0.0
if 'Health' in tag:
health = tag['Health'].value
else:
health = 10.0
if 'Fire' in tag:
fire = tag['Fire'].value
else:
fire = -20.0
if 'HandItems' in tag:
hand_subtag = tag['HandItems']
if len(hand_subtag[0]) <= 0:
main_hand = None
else:
main_hand = load_item_from_tag(hand_subtag[0])
if len(hand_subtag[1]) <= 0:
off_hand = None
else:
off_hand = load_item_from_tag(hand_subtag[1])
else:
main_hand = None
off_hand = None
hand_items = {'MainHand': main_hand, 'OffHand': off_hand}
if 'ArmorItems' in tag:
armor_subtag = tag['ArmorItems']
if len(armor_subtag[0]) <= 0:
feet = None
else:
feet = load_item_from_tag(armor_subtag[0])
if len(armor_subtag[1]) <= 0:
legs = None
else:
legs = load_item_from_tag(armor_subtag[1])
if len(armor_subtag[2]) <= 0:
chest = None
else:
chest = load_item_from_tag(armor_subtag[2])
if len(armor_subtag[3]) <= 0:
head = None
else:
head = load_item_from_tag(armor_subtag[3])
else:
feet = None
legs = None
chest = None
head = None
armor_items= {'Feet': feet, 'Legs': legs, 'Chest': chest, 'Head': head}
if 'HandDropChances' in tag:
hand_drops = tag['HandDropChances']
hand_drop_chances = {'MainHand': hand_drops[0].value,
'OffHand': hand_drops[1].value}
else:
hand_drop_chances = {'MainHand': 0.0,
'OffHand': 0.0}
if 'ArmorDropChances' in tag:
armor_drops = tag['ArmorDropChances']
armor_drop_chances = {'Feet': armor_drops[0].value,
'Legs': armor_drops[1].value,
'Chest': armor_drops[2].value,
'Head': armor_drops[3].value}
else:
armor_drop_chances = {'Feet': 0.0,
'Legs': 0.0,
'Chest': 0.0,
'Head': 0.0}
mc_mob = mc_objects.MOBS[mob_id]
options = mc_mob.options
additional_settings = {}
for tag_name, option_type, tag_data, tag_type in options:
if tag_name in tag:
tag_value = tag[tag_name].value
else:
tag_value = 0
additional_settings[tag_name] = AdditionalMobOption(tag_type, tag_name,
tag_value)
return Mob(mob_id, attributes=attributes, passengers=passengers,
effects=effects, custom_name=custom_name,
custom_name_visible=custom_name_visible, glowing=glowing,
fire_ticks=fire, health=health, absorbtion_amount=absorbtion_amount,
hand_items=hand_items, hand_drop_chances=hand_drop_chances,
armor_items=armor_items, armor_drop_chances=armor_drop_chances,
can_pickup_loot=can_pickup_loot, left_handed=left_handed,
additional_settings=additional_settings, silent=silent
)
def load_spawner(spawner_file):
tag = nbt.NBTFile(spawner_file, 'rb')
return load_spawner_from_tag(tag)
def load_mob(mob_file):
tag = nbt.NBTFile(mob_file, 'rb')
return load_mob_from_tag(tag)
def load_item_from_tag(nbtf):
enchants = []
name = None
lore = []
attributes = []
if 'tag' in nbtf:
tag = nbtf['tag']
if 'ench' in tag:
enchant_tags = tag['ench']
for ench_tag in enchant_tags:
enchants.append(Enchant(ench_tag['id'].value,
level=ench_tag['lvl'].value))
if 'display' in tag:
display_tags = tag['display']
if 'Name' in display_tags:
name = display_tags['Name'].value
if 'Lore' in display_tags:
lore_tags = display_tags['Lore']
for lore_tag in lore_tags:
lore.append(lore_tag.value)
if 'AttributeModifiers' in tag:
attribute_modifiers = tag['AttributeModifiers']
for attr_mod in attribute_modifiers:
attributes.append(Attribute(
attr_mod['AttributeName'].value,
attr_mod['Amount'].value,
slot=attr_mod['Slot'].value,
op_choice=attr_mod['Operation'].value,
attr_uuid=(attr_mod['UUIDLeast'].value,
attr_mod['UUIDMost'].value)))
return Item(nbtf['id'].value, damage=nbtf['Damage'].value,
enchants=enchants, name=name, lore=lore, attributes=attributes)
def load_item(item_file):
nbtf = nbt.NBTFile(item_file, 'rb')
return load_item_from_tag(nbtf)
def has_value_not_none(collectionToCheck):
hasValue = False
for key in collectionToCheck:
if collectionToCheck[key] is not None:
return True
return False
class AdditionalMobOption():
def __init__(self, tag_type, tag_name, value):
self.tag_type = tag_type
self.tag_name = tag_name
self.value = value
def getNBTTag(self):
if self.tag_type in ['Int', 'Short', 'Byte']:
value = int(self.value)
else:
value = self.value
return get_tag_for_type(self.tag_type)(name=self.tag_name,
value=value)
class Mob():
def __init__(self, mob_id, attributes=[], passengers=[], effects=[],
custom_name=None, custom_name_visible=False, glowing=False,
fire_ticks=-20, health=10, absorbtion_amount=0,
hand_items={'MainHand': None, 'OffHand': None},
armor_items={'Feet': None, 'Legs': None, 'Chest': None, 'Head': None},
hand_drop_chances={'MainHand': 0.0, 'OffHand': 0.0},
armor_drop_chances={'Feet': 0.0, 'Legs': 0.0,
'Chest': 0.0, 'Head': 0.0},
can_pickup_loot=False, left_handed=False, additional_settings={},
silent=False):
self.mob_id = mob_id
self.custom_name = custom_name
self.custom_name_visible = custom_name_visible
self.glowing = glowing
self.attributes = attributes
self.passengers = passengers
self.effects = effects
self.fire_ticks = int(fire_ticks)
self.health = health
self.absorbtion_amount = absorbtion_amount
self.can_pickup_loot = can_pickup_loot
self.left_handed = left_handed
self.silent = silent
self.hand_items = hand_items
self.hand_drop_chances = hand_drop_chances
self.armor_drop_chances = armor_drop_chances
self.armor_items = armor_items
self.additional_settings = additional_settings
def getNBTTag(self):
tag = TAG_Compound()
self.add_data_to_tag(tag)
return tag
def add_data_to_tag(self, tag):
tag.tags.append(TAG_String(name='id', value=self.mob_id))
tag.tags.append(TAG_Float(name='Health', value=self.health))
tag.tags.append(TAG_Byte(name="Glowing", value=int(self.glowing)))
tag.tags.append(TAG_Byte(name="Silent", value=int(self.silent)))
tag.tags.append(TAG_Byte(name="LeftHanded",
value=int(self.left_handed)))
tag.tags.append(TAG_Byte(name="CanPickUpLoot",
value=int(self.can_pickup_loot)))
tag.tags.append(TAG_Byte(name="CustomNameVisible",
value=int(self.custom_name_visible)))
tag.tags.append(TAG_Float(name="AbsorptionAmount",
value=self.absorbtion_amount))
tag.tags.append(TAG_Short(name='Fire', value=int(self.fire_ticks)))
if self.custom_name is not None:
tag.tags.append(
TAG_String(name='CustomName', value=self.custom_name))
if len(self.attributes) > 0:
attribute_subtag = TAG_List(name="Attributes", type=TAG_Compound)
for attribute in self.attributes:
attribute_subtag.tags.append(attribute.getNBTTag())
tag.tags.append(attribute_subtag)
if len(self.effects) > 0:
effects_subtag = TAG_List(name="ActiveEffects", type=TAG_Compound)
for effect in self.effects:
effects_subtag.tags.append(effect.getNBTTag())
tag.tags.append(effects_subtag)
if len(self.passengers) > 0:
passenger_subtag = TAG_List(name="Passengers", type=TAG_Compound)
for passenger in self.passengers:
passenger_subtag.tags.append(passenger.getNBTTag())
tag.tags.append(passenger_subtag)
if has_value_not_none(self.hand_items):
hand_items_subtag = TAG_List(name='HandItems', type=TAG_Compound)
if self.hand_items['MainHand'] is not None:
hand_items_subtag.append(
self.hand_items['MainHand'].getNBTTag())
else:
hand_items_subtag.append(TAG_Compound())
if self.hand_items['OffHand'] is not None:
hand_items_subtag.append(
self.hand_items['OffHand'].getNBTTag())
else:
hand_items_subtag.append(TAG_Compound())
hand_drops = TAG_List(name="HandDropChances", type=TAG_Float)
hand_drops.append(
TAG_Float(value=self.hand_drop_chances['MainHand']))
hand_drops.append(
TAG_Float(value=self.hand_drop_chances['OffHand']))
tag.tags.append(hand_items_subtag)
tag.tags.append(hand_drops)
if has_value_not_none(self.armor_items):
armor_items_subtag = TAG_List(name='ArmorItems', type=TAG_Compound)
if self.armor_items['Feet'] is not None:
armor_items_subtag.append(
self.armor_items['Feet'].getNBTTag())
else:
armor_items_subtag.append(TAG_Compound())
if self.armor_items['Legs'] is not None:
armor_items_subtag.append(
self.armor_items['Legs'].getNBTTag())
else:
armor_items_subtag.append(TAG_Compound())
if self.armor_items['Chest'] is not None:
armor_items_subtag.append(
self.armor_items['Chest'].getNBTTag())
else:
armor_items_subtag.append(TAG_Compound())
if self.armor_items['Head'] is not None:
armor_items_subtag.append(
self.armor_items['Head'].getNBTTag())
else:
armor_items_subtag.append(TAG_Compound())
armor_drops = TAG_List(name="ArmorDropChances", type=TAG_Float)
armor_drops.append(
TAG_Float(value=self.armor_drop_chances['Feet']))
armor_drops.append(
TAG_Float(value=self.armor_drop_chances['Legs']))
armor_drops.append(
TAG_Float(value=self.armor_drop_chances['Chest']))
armor_drops.append(
TAG_Float(value=self.armor_drop_chances['Head']))
tag.tags.append(armor_items_subtag)
tag.tags.append(armor_drops)
for additional in self.additional_settings:
option = self.additional_settings[additional]
tag.tags.append(option.getNBTTag())
def getNBTFile(self):
tag = nbt.NBTFile()
tag.name = 'root'
self.add_data_to_tag(tag)
return tag
class Attribute():
def __init__(self, attribute_id, value, slot=None, op_choice=0,
attr_uuid=None):
self.attribute_id = attribute_id
self.value = value
self.slot = slot
self.op_choice = op_choice
if attr_uuid is None:
low, high = struct.unpack("qq", uuid.uuid4().bytes)
else:
low, high = attr_uuid
self.low = low
self.high = high
def getNBTTag(self):
tag = TAG_Compound()
tag.tags.append(TAG_String(name="AttributeName",
value=self.attribute_id))
tag.tags.append(TAG_String(name="Name",
value=self.attribute_id))
tag.tags.append(TAG_Double(name="Amount", value=self.value))
tag.tags.append(TAG_Int(name="Operation", value=int(self.op_choice)))
tag.tags.append(TAG_Long(name="UUIDLeast", value=self.low))
tag.tags.append(TAG_Long(name="UUIDMost", value=self.high))
if self.slot is not None:
tag.tags.append(TAG_String(name="Slot", value=self.slot))
return tag
class Item():
def __init__(self, i_id, damage=0, enchants=[], name=None, lore=[],
attributes=[]):
self.i_id = i_id
self.damage = damage
self.enchants = enchants
self.attributes = attributes
self.name = name
self.lore = lore
def getNBTTag(self):
tag = TAG_Compound()
self.add_data_to_tag(tag)
return tag
def add_data_to_tag(self, tag):
tag.tags.append(TAG_String(name='id', value=self.i_id))
tag.tags.append(TAG_Short(name='Damage', value=int(self.damage)))
tag.tags.append(TAG_Byte(name='Count', value=1))
subtag = TAG_Compound()
subtag.name = 'tag'
did_append = False
if len(self.enchants) > 0:
enchant = TAG_List(name='ench', type=TAG_Compound)
for ench in self.enchants:
enchant.tags.append(ench.getNBTTag())
subtag.tags.append(enchant)
did_append = True
if len(self.attributes) > 0:
attributes = TAG_List(name='AttributeModifiers', type=TAG_Compound)
for attribute in self.attributes:
attributes.tags.append(attribute.getNBTTag())
subtag.tags.append(attributes)
did_append = True
if self.name is not None or len(self.lore) > 0:
display = TAG_Compound()
display.name = 'display'
if self.name is not None:
display.tags.append(TAG_String(name='Name', value=self.name))
if len(self.lore) > 0:
lore_tag = TAG_List(name='Lore', type=TAG_String)
for lore in self.lore:
lore_tag.tags.append(TAG_String(value=lore))
display.tags.append(lore_tag)
subtag.tags.append(display)
did_append = True
if did_append:
tag.tags.append(subtag)
def getNBTFile(self):
tag = nbt.NBTFile()
tag.name = 'root'
self.add_data_to_tag(tag)
return tag
class Effect():
def __init__(self, effect_id, amplifier, duration, ambient=False,
show_particles=False):
self.effect_id = effect_id
self.amplifier = amplifier
self.duration = int(duration)
self.ambient = ambient
self.show_particles = show_particles
def getNBTTag(self):
tag = TAG_Compound()
tag.tags.append(TAG_Byte(name="id", value=int(self.effect_id)))
tag.tags.append(TAG_Byte(name="Amplifier", value=int(self.amplifier)))
tag.tags.append(TAG_Int(name="Duration", value=int(self.duration)))
tag.tags.append(TAG_Byte(name="Ambient", value=int(self.ambient)))
tag.tags.append(
TAG_Byte(name="ShowParticles", value=int(self.show_particles)))
return tag
class SpawnPotential():
def __init__(self, mob, weight=1):
self.spawn_type = mob.mob_id
self.weight = weight
self.mob = mob
class Spawner():
def __init__(self, spawn_potentials, spawn_count=1, spawn_range=4,
required_player_range=None, max_nearby_entities=None, delay=0,
min_spawn_delay=100, max_spawn_delay=300):
self.spawn_potentials = spawn_potentials
self.spawn_count = spawn_count
self.spawn_range = spawn_range
self.required_player_range = required_player_range
self.max_nearby_entities = max_nearby_entities
self.delay = delay
self.min_spawn_delay = min_spawn_delay
self.max_spawn_delay = max_spawn_delay
def getNBTFile(self):
tag = nbt.NBTFile()
tag.name = 'root'
self.add_data_to_tag(tag)
return tag
def getNBTTag(self):
tag = TAG_Compound()
self.add_data_to_tag(tag)
return tag
def add_data_to_tag(self, tag):
tag.tags.append(TAG_Short(name='SpawnCount',
value=int(self.spawn_count)))
tag.tags.append(TAG_Short(name='SpawnRange',
value=int(self.spawn_range)))
tag.tags.append(TAG_Short(name='Delay',
value=int(self.delay)))
tag.tags.append(TAG_Short(name='MinSpawnDelay',
value=int(self.min_spawn_delay)))
tag.tags.append(TAG_Short(name='MaxSpawnDelay',
value=int(self.max_spawn_delay)))
if self.required_player_range is not None:
tag.tags.append(TAG_Short(name='RequiredPlayerRange',
value=int(self.required_player_range)))
if self.max_nearby_entities is not None:
tag.tags.append(TAG_Short(name='MaxNearbyEntities',
value=int(self.max_nearby_entities)))
if len(self.spawn_potentials) > 0:
potentials_subtag = TAG_List(name="SpawnPotentials",
type=TAG_Compound)
for spawn_potential in self.spawn_potentials:
pot_tag = TAG_Compound()
pot_tag.tags.append(
TAG_Int(name="Weight", value=int(spawn_potential.weight)))
mob_tag = spawn_potential.mob.getNBTTag()
mob_tag.name = 'Entity'
pot_tag.tags.append(mob_tag)
potentials_subtag.tags.append(pot_tag)
tag.tags.append(potentials_subtag)
class Enchant():
def __init__(self, e_id, level=1):
self.e_id = e_id
self.level = level
def getNBTTag(self):
tag = TAG_Compound()
tag.tags.append(TAG_Short(name="id", value=int(self.e_id)))
tag.tags.append(TAG_Short(name="lvl", value=int(self.level)))
return tag
|
|
import re
import os
import glob
import shlex
import subprocess
import charmhelpers.core.decorators as decorators
import charmhelpers.core.hookenv as hookenv
def format_pci_addr(pci_addr):
"""Pad a PCI address eg 0:0:1.1 becomes 0000:00:01.1
:param pci_addr: str
:return pci_addr: str
"""
domain, bus, slot_func = pci_addr.split(':')
slot, func = slot_func.split('.')
return '{}:{}:{}.{}'.format(domain.zfill(4), bus.zfill(2), slot.zfill(2),
func)
class VPECLIException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
class PCINetDevice(object):
def __init__(self, pci_address):
"""Class representing a PCI device
:param pci_addr: str PCI address of device
"""
self.pci_address = pci_address
self.update_attributes()
def update_attributes(self):
"""Query the underlying system and update attributes of this device
"""
self.update_modalias_kmod()
self.update_interface_info()
@property
def loaded_kmod(self):
"""Return Kernel module this device is using
:returns str: Kernel module
"""
cmd = ['lspci', '-ks', self.pci_address]
lspci_output = subprocess.check_output(cmd)
kdrive = None
for line in lspci_output.split('\n'):
if 'Kernel driver' in line:
kdrive = line.split(':')[1].strip()
hookenv.log('Loaded kmod for {} is {}'.format(
self.pci_address, kdrive))
return kdrive
def update_modalias_kmod(self):
"""Set the default kernel module for this device
If a device is orphaned it has no kernel module loaded to support it
so look up the device in modules.alias and set the kernel module
it needs"""
cmd = ['lspci', '-ns', self.pci_address]
lspci_output = subprocess.check_output(cmd).split()
vendor_device = lspci_output[2]
vendor, device = vendor_device.split(':')
pci_string = 'pci:v{}d{}'.format(vendor.zfill(8), device.zfill(8))
kernel_name = self.get_kernel_name()
alias_files = '/lib/modules/{}/modules.alias'.format(kernel_name)
kmod = None
with open(alias_files, 'r') as f:
for line in f.readlines():
if pci_string in line:
kmod = line.split()[-1]
hookenv.log('module.alias kmod for {} is {}'.format(
self.pci_address, kmod))
self.modalias_kmod = kmod
def update_interface_info(self):
"""Set the interface name, mac address and state properties of this
object"""
if self.loaded_kmod:
if self.loaded_kmod == 'igb_uio':
return self.update_interface_info_vpe()
else:
return self.update_interface_info_eth()
else:
self.interface_name = None
self.mac_address = None
self.state = 'unbound'
def get_kernel_name(self):
"""Return the kernel release of the running kernel
:returns str: Kernel release
"""
return subprocess.check_output(['uname', '-r']).strip()
def pci_rescan(self):
"""Rescan of all PCI buses in the system, and
re-discover previously removed devices."""
rescan_file = '/sys/bus/pci/rescan'
with open(rescan_file, 'w') as f:
f.write('1')
def bind(self, kmod):
"""Write PCI address to the bind file to cause the driver to attempt to
bind to the device found at the PCI address. This is useful for
overriding default bindings."""
bind_file = '/sys/bus/pci/drivers/{}/bind'.format(kmod)
hookenv.log('Binding {} to {}'.format(self.pci_address, bind_file))
with open(bind_file, 'w') as f:
f.write(self.pci_address)
self.pci_rescan()
self.update_attributes()
def unbind(self):
"""Write PCI address to the unbind file to cause the driver to attempt
to unbind the device found at at the PCI address."""
if not self.loaded_kmod:
return
unbind_file = '/sys/bus/pci/drivers/{}/unbind'.format(self.loaded_kmod)
hookenv.log('Unbinding {} from {}'.format(
self.pci_address, unbind_file))
with open(unbind_file, 'w') as f:
f.write(self.pci_address)
self.pci_rescan()
self.update_attributes()
def update_interface_info_vpe(self):
"""Query VPE CLI to set the interface name, mac address and state
properties of this device"""
vpe_devices = self.get_vpe_interfaces_and_macs()
device_info = {}
for interface in vpe_devices:
if self.pci_address == interface['pci_address']:
device_info['interface'] = interface['interface']
device_info['macAddress'] = interface['macAddress']
if device_info:
self.interface_name = device_info['interface']
self.mac_address = device_info['macAddress']
self.state = 'vpebound'
else:
self.interface_name = None
self.mac_address = None
self.state = None
@decorators.retry_on_exception(5, base_delay=10,
exc_type=subprocess.CalledProcessError)
def get_vpe_cli_out(self):
"""Query VPE CLI and dump interface information
:returns str: confd_cli output"""
echo_cmd = [
'echo', '-e', 'show interfaces-state interface phys-address\nexit']
cli_cmd = ['/opt/cisco/vpe/bin/confd_cli', '-N', '-C', '-u', 'system']
echo = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE)
cli_output = subprocess.check_output(cli_cmd, stdin=echo.stdout)
echo.wait()
echo.terminate
hookenv.log('confd_cli: ' + cli_output)
return cli_output
def get_vpe_interfaces_and_macs(self):
"""Parse output from VPE CLI and retrun list of interface data dicts
:returns list: list of dicts of interface data
eg [
{
'interface': 'TenGigabitEthernet6/0/0',
'macAddress': '84:b8:02:2a:5f:c3',
'pci_address': '0000:06:00.0'
},
{
'interface': 'TenGigabitEthernet7/0/0',
'macAddress': '84:b8:02:2a:5f:c4',
'pci_address': '0000:07:00.0'
},
]
"""
cli_output = self.get_vpe_cli_out()
vpe_devs = []
if 'local0' not in cli_output:
msg = ('local0 missing from confd_cli output, assuming things '
'went wrong')
raise VPECLIException(1, msg)
for line in cli_output.split('\n'):
if re.search(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', line, re.I):
interface, mac = line.split()
pci_addr = self.extract_pci_addr_from_vpe_interface(interface)
vpe_devs.append({
'interface': interface,
'macAddress': mac,
'pci_address': pci_addr,
})
return vpe_devs
def extract_pci_addr_from_vpe_interface(self, nic):
"""Convert a str from nic postfix format to padded format
:returns list: list of dicts of interface data
eg 6/1/2 -> 0000:06:01.2"""
hookenv.log('Extracting pci address from {}'.format(nic))
addr = re.sub(r'^.*Ethernet', '', nic, re.IGNORECASE)
bus, slot, func = addr.split('/')
domain = '0000'
pci_addr = format_pci_addr(
'{}:{}:{}.{}'.format(domain, bus, slot, func))
hookenv.log('pci address for {} is {}'.format(nic, pci_addr))
return pci_addr
def update_interface_info_eth(self):
"""Set the interface name, mac address and state
properties of this device if device is in sys fs"""
net_devices = self.get_sysnet_interfaces_and_macs()
for interface in net_devices:
if self.pci_address == interface['pci_address']:
self.interface_name = interface['interface']
self.mac_address = interface['macAddress']
self.state = interface['state']
def get_sysnet_interfaces_and_macs(self):
"""Query sys fs and retrun list of interface data dicts
eg [
{
'interface': 'eth2',
'macAddress': 'a8:9d:21:cf:93:fc',
'pci_address': '0000:10:00.0',
'state': 'up'
},
{
'interface': 'eth3',
'macAddress': 'a8:9d:21:cf:93:fd',
'pci_address': '0000:10:00.1',
'state': 'down'
}
]
"""
net_devs = []
for sdir in glob.glob('/sys/class/net/*'):
sym_link = sdir + "/device"
if os.path.islink(sym_link):
fq_path = os.path.realpath(sym_link)
path = fq_path.split('/')
if 'virtio' in path[-1]:
pci_address = path[-2]
else:
pci_address = path[-1]
net_devs.append({
'interface': self.get_sysnet_interface(sdir),
'macAddress': self.get_sysnet_mac(sdir),
'pci_address': pci_address,
'state': self.get_sysnet_device_state(sdir),
})
return net_devs
def get_sysnet_mac(self, sysdir):
"""Extract MAC address from sys device file
:returns str: mac address"""
mac_addr_file = sysdir + '/address'
with open(mac_addr_file, 'r') as f:
read_data = f.read()
mac = read_data.strip()
hookenv.log('mac from {} is {}'.format(mac_addr_file, mac))
return mac
def get_sysnet_device_state(self, sysdir):
"""Extract device state from sys device file
:returns str: device state"""
state_file = sysdir + '/operstate'
with open(state_file, 'r') as f:
read_data = f.read()
state = read_data.strip()
hookenv.log('state from {} is {}'.format(state_file, state))
return state
def get_sysnet_interface(self, sysdir):
"""Extract device file from FQ path
:returns str: interface name"""
return sysdir.split('/')[-1]
class PCINetDevices(object):
"""PCINetDevices represents a collection of PCI Network devices on the
running system"""
def __init__(self):
"""Initialise a collection of PCINetDevice"""
pci_addresses = self.get_pci_ethernet_addresses()
self.pci_devices = [PCINetDevice(dev) for dev in pci_addresses]
def get_pci_ethernet_addresses(self):
"""Query lspci to retrieve a list of PCI address for devices of type
'Ethernet controller'
:returns list: List of PCI addresses of Ethernet controllers"""
cmd = ['lspci', '-m', '-D']
lspci_output = subprocess.check_output(cmd)
pci_addresses = []
for line in lspci_output.split('\n'):
columns = shlex.split(line)
if len(columns) > 1 and columns[1] == 'Ethernet controller':
pci_address = columns[0]
pci_addresses.append(format_pci_addr(pci_address))
return pci_addresses
def update_devices(self):
"""Update attributes of each device in collection"""
for pcidev in self.pci_devices:
pcidev.update_attributes()
def get_macs(self):
"""MAC addresses of all devices in collection
:returns list: List of MAC addresses"""
macs = []
for pcidev in self.pci_devices:
if pcidev.mac_address:
macs.append(pcidev.mac_address)
return macs
def get_device_from_mac(self, mac):
"""Given a MAC address return the corresponding PCINetDevice
:returns PCINetDevice"""
for pcidev in self.pci_devices:
if pcidev.mac_address == mac:
return pcidev
def get_device_from_pci_address(self, pci_addr):
"""Given a PCI address return the corresponding PCINetDevice
:returns PCINetDevice"""
for pcidev in self.pci_devices:
if pcidev.pci_address == pci_addr:
return pcidev
def rebind_orphans(self):
"""Unbind orphaned devices from the kernel module they are currently
using and then bind it with its default kernel module"""
self.unbind_orphans()
self.bind_orphans()
def unbind_orphans(self):
"""Unbind orphaned devices from the kernel module they are currently
using"""
for orphan in self.get_orphans():
orphan.unbind()
self.update_devices()
def bind_orphans(self):
"""Bind orphans with their default kernel module"""
for orphan in self.get_orphans():
orphan.bind(orphan.modalias_kmod)
self.update_devices()
def get_orphans(self):
"""An 'orphan' is a device which is not fully setup. It may not be
associated with a kernel module or may lay a name or MAC address.
:returns list: List of PCINetDevice"""
orphans = []
for pcidev in self.pci_devices:
if not pcidev.loaded_kmod or pcidev.loaded_kmod == 'igb_uio':
if not pcidev.interface_name and not pcidev.mac_address:
orphans.append(pcidev)
return orphans
class PCIInfo(object):
def __init__(self):
"""Inspect the charm config option 'mac-network-map' against the MAC
addresses on the running system.
Attributes:
user_requested_config dict Dictionary of MAC addresses and the
networks they are associated with.
local_macs list MAC addresses on local machine
pci_addresses list PCI Addresses of network devices on
local machine
vpe_dev_string str String containing PCI addresse in
format used by vpe.conf
local_mac_nets dict Dictionary of list of dicts with
interface and netork information
keyed on MAC address eg
{
'mac1': [{'interface': 'eth0', 'net': 'net1'},
{'interface': 'eth0', 'net': 'net2'}],
'mac2': [{'interface': 'eth1', 'net': 'net1'}],}
"""
self.user_requested_config = self.get_user_requested_config()
net_devices = PCINetDevices()
self.local_macs = net_devices.get_macs()
self.pci_addresses = []
self.local_mac_nets = {}
for mac in self.user_requested_config.keys():
hookenv.log('Checking if {} is on this host'.format(mac))
if mac in self.local_macs:
hookenv.log('{} is on this host'.format(mac))
device = net_devices.get_device_from_mac(mac)
hookenv.log('{} is {} and is currently {}'.format(mac,
device.pci_address, device.interface_name))
if device.state == 'up':
hookenv.log('Refusing to add {} to device list as it is '
'{}'.format(device.pci_address, device.state))
else:
self.pci_addresses.append(device.pci_address)
self.local_mac_nets[mac] = []
for conf in self.user_requested_config[mac]:
self.local_mac_nets[mac].append({
'net': conf.get('net'),
'interface': device.interface_name,
})
if self.pci_addresses:
self.pci_addresses.sort()
self.vpe_dev_string = 'dev ' + ' dev '.join(self.pci_addresses)
else:
self.vpe_dev_string = 'no-pci'
hookenv.log('vpe_dev_string {}'.format(self.vpe_dev_string))
def parse_mmap_entry(self, conf):
"""Extract mac and net pairs from list in the form
['mac=mac1', 'net=net1']
:returns tuple: (mac, net)
"""
entry = {a.split('=')[0]: a.split('=')[1] for a in conf}
return entry['mac'], entry['net']
def get_user_requested_config(self):
''' Parse the user requested config str
mac=<mac>;net=<net> and return a dict keyed on mac address
:returns dict: Dictionary of MAC addresses and the networks they are
associated with. eg
mac-network-map set to 'mac=mac1;net=net1
mac=mac1;net=net2
mac=mac2;net=net1'
returns:
{
'mac1': [{'net': 'net1'}, {'net': 'net2'}],
'mac2': [{'net': 'net1'}]}
}
'''
mac_net_config = {}
mac_map = hookenv.config('mac-network-map')
if mac_map:
for conf_group in mac_map.split():
try:
mac, net = self.parse_mmap_entry(conf_group.split(';'))
# Ignore bad config entries
except IndexError:
hookenv.log('Ignoring bad config entry {} in'
'mac-network-map'.format(conf_group))
continue
except KeyError:
hookenv.log('Ignoring bad config entry {} in'
'mac-network-map'.format(conf_group))
continue
try:
mac_net_config[mac].append({'net': net})
except KeyError:
mac_net_config[mac] = [{'net': net}]
return mac_net_config
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class TriggerList(ListResource):
def __init__(self, version, account_sid):
"""
Initialize the TriggerList
:param Version version: Version that contains the resource
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerList
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerList
"""
super(TriggerList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, }
self._uri = '/Accounts/{account_sid}/Usage/Triggers.json'.format(**self._solution)
def create(self, callback_url, trigger_value, usage_category,
callback_method=values.unset, friendly_name=values.unset,
recurring=values.unset, trigger_by=values.unset):
"""
Create the TriggerInstance
:param unicode callback_url: The URL we call when the trigger fires
:param unicode trigger_value: The usage value at which the trigger should fire
:param TriggerInstance.UsageCategory usage_category: The usage category the trigger watches
:param unicode callback_method: The HTTP method to use to call callback_url
:param unicode friendly_name: A string to describe the resource
:param TriggerInstance.Recurring recurring: The frequency of a recurring UsageTrigger
:param TriggerInstance.TriggerField trigger_by: The field in the UsageRecord resource that fires the trigger
:returns: The created TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
data = values.of({
'CallbackUrl': callback_url,
'TriggerValue': trigger_value,
'UsageCategory': usage_category,
'CallbackMethod': callback_method,
'FriendlyName': friendly_name,
'Recurring': recurring,
'TriggerBy': trigger_by,
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return TriggerInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def stream(self, recurring=values.unset, trigger_by=values.unset,
usage_category=values.unset, limit=None, page_size=None):
"""
Streams TriggerInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param TriggerInstance.Recurring recurring: The frequency of recurring UsageTriggers to read
:param TriggerInstance.TriggerField trigger_by: The trigger field of the UsageTriggers to read
:param TriggerInstance.UsageCategory usage_category: The usage category of the UsageTriggers to read
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.trigger.TriggerInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
recurring=recurring,
trigger_by=trigger_by,
usage_category=usage_category,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, recurring=values.unset, trigger_by=values.unset,
usage_category=values.unset, limit=None, page_size=None):
"""
Lists TriggerInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param TriggerInstance.Recurring recurring: The frequency of recurring UsageTriggers to read
:param TriggerInstance.TriggerField trigger_by: The trigger field of the UsageTriggers to read
:param TriggerInstance.UsageCategory usage_category: The usage category of the UsageTriggers to read
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.trigger.TriggerInstance]
"""
return list(self.stream(
recurring=recurring,
trigger_by=trigger_by,
usage_category=usage_category,
limit=limit,
page_size=page_size,
))
def page(self, recurring=values.unset, trigger_by=values.unset,
usage_category=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of TriggerInstance records from the API.
Request is executed immediately
:param TriggerInstance.Recurring recurring: The frequency of recurring UsageTriggers to read
:param TriggerInstance.TriggerField trigger_by: The trigger field of the UsageTriggers to read
:param TriggerInstance.UsageCategory usage_category: The usage category of the UsageTriggers to read
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerPage
"""
data = values.of({
'Recurring': recurring,
'TriggerBy': trigger_by,
'UsageCategory': usage_category,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return TriggerPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of TriggerInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return TriggerPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a TriggerContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
"""
return TriggerContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a TriggerContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
"""
return TriggerContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.TriggerList>'
class TriggerPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the TriggerPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerPage
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerPage
"""
super(TriggerPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of TriggerInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
return TriggerInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.TriggerPage>'
class TriggerContext(InstanceContext):
def __init__(self, version, account_sid, sid):
"""
Initialize the TriggerContext
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource to fetch
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
"""
super(TriggerContext, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'sid': sid, }
self._uri = '/Accounts/{account_sid}/Usage/Triggers/{sid}.json'.format(**self._solution)
def fetch(self):
"""
Fetch the TriggerInstance
:returns: The fetched TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return TriggerInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def update(self, callback_method=values.unset, callback_url=values.unset,
friendly_name=values.unset):
"""
Update the TriggerInstance
:param unicode callback_method: The HTTP method to use to call callback_url
:param unicode callback_url: The URL we call when the trigger fires
:param unicode friendly_name: A string to describe the resource
:returns: The updated TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
data = values.of({
'CallbackMethod': callback_method,
'CallbackUrl': callback_url,
'FriendlyName': friendly_name,
})
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return TriggerInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the TriggerInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.TriggerContext {}>'.format(context)
class TriggerInstance(InstanceResource):
class UsageCategory(object):
A2P_REGISTRATION_FEES = "a2p-registration-fees"
AGENT_CONFERENCE = "agent-conference"
ANSWERING_MACHINE_DETECTION = "answering-machine-detection"
AUTHY_AUTHENTICATIONS = "authy-authentications"
AUTHY_CALLS_OUTBOUND = "authy-calls-outbound"
AUTHY_MONTHLY_FEES = "authy-monthly-fees"
AUTHY_PHONE_INTELLIGENCE = "authy-phone-intelligence"
AUTHY_PHONE_VERIFICATIONS = "authy-phone-verifications"
AUTHY_SMS_OUTBOUND = "authy-sms-outbound"
CALL_PROGESS_EVENTS = "call-progess-events"
CALLERIDLOOKUPS = "calleridlookups"
CALLS = "calls"
CALLS_CLIENT = "calls-client"
CALLS_GLOBALCONFERENCE = "calls-globalconference"
CALLS_INBOUND = "calls-inbound"
CALLS_INBOUND_LOCAL = "calls-inbound-local"
CALLS_INBOUND_MOBILE = "calls-inbound-mobile"
CALLS_INBOUND_TOLLFREE = "calls-inbound-tollfree"
CALLS_OUTBOUND = "calls-outbound"
CALLS_PAY_VERB_TRANSACTIONS = "calls-pay-verb-transactions"
CALLS_RECORDINGS = "calls-recordings"
CALLS_SIP = "calls-sip"
CALLS_SIP_INBOUND = "calls-sip-inbound"
CALLS_SIP_OUTBOUND = "calls-sip-outbound"
CALLS_TRANSFERS = "calls-transfers"
CARRIER_LOOKUPS = "carrier-lookups"
CONVERSATIONS = "conversations"
CONVERSATIONS_API_REQUESTS = "conversations-api-requests"
CONVERSATIONS_CONVERSATION_EVENTS = "conversations-conversation-events"
CONVERSATIONS_ENDPOINT_CONNECTIVITY = "conversations-endpoint-connectivity"
CONVERSATIONS_EVENTS = "conversations-events"
CONVERSATIONS_PARTICIPANT_EVENTS = "conversations-participant-events"
CONVERSATIONS_PARTICIPANTS = "conversations-participants"
CPS = "cps"
FLEX_USAGE = "flex-usage"
FRAUD_LOOKUPS = "fraud-lookups"
GROUP_ROOMS = "group-rooms"
GROUP_ROOMS_DATA_TRACK = "group-rooms-data-track"
GROUP_ROOMS_ENCRYPTED_MEDIA_RECORDED = "group-rooms-encrypted-media-recorded"
GROUP_ROOMS_MEDIA_DOWNLOADED = "group-rooms-media-downloaded"
GROUP_ROOMS_MEDIA_RECORDED = "group-rooms-media-recorded"
GROUP_ROOMS_MEDIA_ROUTED = "group-rooms-media-routed"
GROUP_ROOMS_MEDIA_STORED = "group-rooms-media-stored"
GROUP_ROOMS_PARTICIPANT_MINUTES = "group-rooms-participant-minutes"
GROUP_ROOMS_RECORDED_MINUTES = "group-rooms-recorded-minutes"
IMP_V1_USAGE = "imp-v1-usage"
LOOKUPS = "lookups"
MARKETPLACE = "marketplace"
MARKETPLACE_ALGORITHMIA_NAMED_ENTITY_RECOGNITION = "marketplace-algorithmia-named-entity-recognition"
MARKETPLACE_CADENCE_TRANSCRIPTION = "marketplace-cadence-transcription"
MARKETPLACE_CADENCE_TRANSLATION = "marketplace-cadence-translation"
MARKETPLACE_CAPIO_SPEECH_TO_TEXT = "marketplace-capio-speech-to-text"
MARKETPLACE_CONVRIZA_ABABA = "marketplace-convriza-ababa"
MARKETPLACE_DEEPGRAM_PHRASE_DETECTOR = "marketplace-deepgram-phrase-detector"
MARKETPLACE_DIGITAL_SEGMENT_BUSINESS_INFO = "marketplace-digital-segment-business-info"
MARKETPLACE_FACEBOOK_OFFLINE_CONVERSIONS = "marketplace-facebook-offline-conversions"
MARKETPLACE_GOOGLE_SPEECH_TO_TEXT = "marketplace-google-speech-to-text"
MARKETPLACE_IBM_WATSON_MESSAGE_INSIGHTS = "marketplace-ibm-watson-message-insights"
MARKETPLACE_IBM_WATSON_MESSAGE_SENTIMENT = "marketplace-ibm-watson-message-sentiment"
MARKETPLACE_IBM_WATSON_RECORDING_ANALYSIS = "marketplace-ibm-watson-recording-analysis"
MARKETPLACE_IBM_WATSON_TONE_ANALYZER = "marketplace-ibm-watson-tone-analyzer"
MARKETPLACE_ICEHOOK_SYSTEMS_SCOUT = "marketplace-icehook-systems-scout"
MARKETPLACE_INFOGROUP_DATAAXLE_BIZINFO = "marketplace-infogroup-dataaxle-bizinfo"
MARKETPLACE_KEEN_IO_CONTACT_CENTER_ANALYTICS = "marketplace-keen-io-contact-center-analytics"
MARKETPLACE_MARCHEX_CLEANCALL = "marketplace-marchex-cleancall"
MARKETPLACE_MARCHEX_SENTIMENT_ANALYSIS_FOR_SMS = "marketplace-marchex-sentiment-analysis-for-sms"
MARKETPLACE_MARKETPLACE_NEXTCALLER_SOCIAL_ID = "marketplace-marketplace-nextcaller-social-id"
MARKETPLACE_MOBILE_COMMONS_OPT_OUT_CLASSIFIER = "marketplace-mobile-commons-opt-out-classifier"
MARKETPLACE_NEXIWAVE_VOICEMAIL_TO_TEXT = "marketplace-nexiwave-voicemail-to-text"
MARKETPLACE_NEXTCALLER_ADVANCED_CALLER_IDENTIFICATION = "marketplace-nextcaller-advanced-caller-identification"
MARKETPLACE_NOMOROBO_SPAM_SCORE = "marketplace-nomorobo-spam-score"
MARKETPLACE_PAYFONE_TCPA_COMPLIANCE = "marketplace-payfone-tcpa-compliance"
MARKETPLACE_REMEETING_AUTOMATIC_SPEECH_RECOGNITION = "marketplace-remeeting-automatic-speech-recognition"
MARKETPLACE_TCPA_DEFENSE_SOLUTIONS_BLACKLIST_FEED = "marketplace-tcpa-defense-solutions-blacklist-feed"
MARKETPLACE_TELO_OPENCNAM = "marketplace-telo-opencnam"
MARKETPLACE_TRUECNAM_TRUE_SPAM = "marketplace-truecnam-true-spam"
MARKETPLACE_TWILIO_CALLER_NAME_LOOKUP_US = "marketplace-twilio-caller-name-lookup-us"
MARKETPLACE_TWILIO_CARRIER_INFORMATION_LOOKUP = "marketplace-twilio-carrier-information-lookup"
MARKETPLACE_VOICEBASE_PCI = "marketplace-voicebase-pci"
MARKETPLACE_VOICEBASE_TRANSCRIPTION = "marketplace-voicebase-transcription"
MARKETPLACE_VOICEBASE_TRANSCRIPTION_CUSTOM_VOCABULARY = "marketplace-voicebase-transcription-custom-vocabulary"
MARKETPLACE_WHITEPAGES_PRO_CALLER_IDENTIFICATION = "marketplace-whitepages-pro-caller-identification"
MARKETPLACE_WHITEPAGES_PRO_PHONE_INTELLIGENCE = "marketplace-whitepages-pro-phone-intelligence"
MARKETPLACE_WHITEPAGES_PRO_PHONE_REPUTATION = "marketplace-whitepages-pro-phone-reputation"
MARKETPLACE_WOLFARM_SPOKEN_RESULTS = "marketplace-wolfarm-spoken-results"
MARKETPLACE_WOLFRAM_SHORT_ANSWER = "marketplace-wolfram-short-answer"
MARKETPLACE_YTICA_CONTACT_CENTER_REPORTING_ANALYTICS = "marketplace-ytica-contact-center-reporting-analytics"
MEDIASTORAGE = "mediastorage"
MMS = "mms"
MMS_INBOUND = "mms-inbound"
MMS_INBOUND_LONGCODE = "mms-inbound-longcode"
MMS_INBOUND_SHORTCODE = "mms-inbound-shortcode"
MMS_MESSAGES_CARRIERFEES = "mms-messages-carrierfees"
MMS_OUTBOUND = "mms-outbound"
MMS_OUTBOUND_LONGCODE = "mms-outbound-longcode"
MMS_OUTBOUND_SHORTCODE = "mms-outbound-shortcode"
MONITOR_READS = "monitor-reads"
MONITOR_STORAGE = "monitor-storage"
MONITOR_WRITES = "monitor-writes"
NOTIFY = "notify"
NOTIFY_ACTIONS_ATTEMPTS = "notify-actions-attempts"
NOTIFY_CHANNELS = "notify-channels"
NUMBER_FORMAT_LOOKUPS = "number-format-lookups"
PCHAT = "pchat"
PCHAT_USERS = "pchat-users"
PEER_TO_PEER_ROOMS_PARTICIPANT_MINUTES = "peer-to-peer-rooms-participant-minutes"
PFAX = "pfax"
PFAX_MINUTES = "pfax-minutes"
PFAX_MINUTES_INBOUND = "pfax-minutes-inbound"
PFAX_MINUTES_OUTBOUND = "pfax-minutes-outbound"
PFAX_PAGES = "pfax-pages"
PHONENUMBERS = "phonenumbers"
PHONENUMBERS_CPS = "phonenumbers-cps"
PHONENUMBERS_EMERGENCY = "phonenumbers-emergency"
PHONENUMBERS_LOCAL = "phonenumbers-local"
PHONENUMBERS_MOBILE = "phonenumbers-mobile"
PHONENUMBERS_SETUPS = "phonenumbers-setups"
PHONENUMBERS_TOLLFREE = "phonenumbers-tollfree"
PREMIUMSUPPORT = "premiumsupport"
PROXY = "proxy"
PROXY_ACTIVE_SESSIONS = "proxy-active-sessions"
PSTNCONNECTIVITY = "pstnconnectivity"
PV = "pv"
PV_COMPOSITION_MEDIA_DOWNLOADED = "pv-composition-media-downloaded"
PV_COMPOSITION_MEDIA_ENCRYPTED = "pv-composition-media-encrypted"
PV_COMPOSITION_MEDIA_STORED = "pv-composition-media-stored"
PV_COMPOSITION_MINUTES = "pv-composition-minutes"
PV_RECORDING_COMPOSITIONS = "pv-recording-compositions"
PV_ROOM_PARTICIPANTS = "pv-room-participants"
PV_ROOM_PARTICIPANTS_AU1 = "pv-room-participants-au1"
PV_ROOM_PARTICIPANTS_BR1 = "pv-room-participants-br1"
PV_ROOM_PARTICIPANTS_IE1 = "pv-room-participants-ie1"
PV_ROOM_PARTICIPANTS_JP1 = "pv-room-participants-jp1"
PV_ROOM_PARTICIPANTS_SG1 = "pv-room-participants-sg1"
PV_ROOM_PARTICIPANTS_US1 = "pv-room-participants-us1"
PV_ROOM_PARTICIPANTS_US2 = "pv-room-participants-us2"
PV_ROOMS = "pv-rooms"
PV_SIP_ENDPOINT_REGISTRATIONS = "pv-sip-endpoint-registrations"
RECORDINGS = "recordings"
RECORDINGSTORAGE = "recordingstorage"
ROOMS_GROUP_BANDWIDTH = "rooms-group-bandwidth"
ROOMS_GROUP_MINUTES = "rooms-group-minutes"
ROOMS_PEER_TO_PEER_MINUTES = "rooms-peer-to-peer-minutes"
SHORTCODES = "shortcodes"
SHORTCODES_CUSTOMEROWNED = "shortcodes-customerowned"
SHORTCODES_MMS_ENABLEMENT = "shortcodes-mms-enablement"
SHORTCODES_MPS = "shortcodes-mps"
SHORTCODES_RANDOM = "shortcodes-random"
SHORTCODES_UK = "shortcodes-uk"
SHORTCODES_VANITY = "shortcodes-vanity"
SMALL_GROUP_ROOMS = "small-group-rooms"
SMALL_GROUP_ROOMS_DATA_TRACK = "small-group-rooms-data-track"
SMALL_GROUP_ROOMS_PARTICIPANT_MINUTES = "small-group-rooms-participant-minutes"
SMS = "sms"
SMS_INBOUND = "sms-inbound"
SMS_INBOUND_LONGCODE = "sms-inbound-longcode"
SMS_INBOUND_SHORTCODE = "sms-inbound-shortcode"
SMS_MESSAGES_CARRIERFEES = "sms-messages-carrierfees"
SMS_MESSAGES_FEATURES = "sms-messages-features"
SMS_MESSAGES_FEATURES_SENDERID = "sms-messages-features-senderid"
SMS_OUTBOUND = "sms-outbound"
SMS_OUTBOUND_CONTENT_INSPECTION = "sms-outbound-content-inspection"
SMS_OUTBOUND_LONGCODE = "sms-outbound-longcode"
SMS_OUTBOUND_SHORTCODE = "sms-outbound-shortcode"
SPEECH_RECOGNITION = "speech-recognition"
STUDIO_ENGAGEMENTS = "studio-engagements"
SYNC = "sync"
SYNC_ACTIONS = "sync-actions"
SYNC_ENDPOINT_HOURS = "sync-endpoint-hours"
SYNC_ENDPOINT_HOURS_ABOVE_DAILY_CAP = "sync-endpoint-hours-above-daily-cap"
TASKROUTER_TASKS = "taskrouter-tasks"
TOTALPRICE = "totalprice"
TRANSCRIPTIONS = "transcriptions"
TRUNKING_CPS = "trunking-cps"
TRUNKING_EMERGENCY_CALLS = "trunking-emergency-calls"
TRUNKING_ORIGINATION = "trunking-origination"
TRUNKING_ORIGINATION_LOCAL = "trunking-origination-local"
TRUNKING_ORIGINATION_MOBILE = "trunking-origination-mobile"
TRUNKING_ORIGINATION_TOLLFREE = "trunking-origination-tollfree"
TRUNKING_RECORDINGS = "trunking-recordings"
TRUNKING_SECURE = "trunking-secure"
TRUNKING_TERMINATION = "trunking-termination"
TURNMEGABYTES = "turnmegabytes"
TURNMEGABYTES_AUSTRALIA = "turnmegabytes-australia"
TURNMEGABYTES_BRASIL = "turnmegabytes-brasil"
TURNMEGABYTES_GERMANY = "turnmegabytes-germany"
TURNMEGABYTES_INDIA = "turnmegabytes-india"
TURNMEGABYTES_IRELAND = "turnmegabytes-ireland"
TURNMEGABYTES_JAPAN = "turnmegabytes-japan"
TURNMEGABYTES_SINGAPORE = "turnmegabytes-singapore"
TURNMEGABYTES_USEAST = "turnmegabytes-useast"
TURNMEGABYTES_USWEST = "turnmegabytes-uswest"
TWILIO_INTERCONNECT = "twilio-interconnect"
VERIFY_PUSH = "verify-push"
VIDEO_RECORDINGS = "video-recordings"
VOICE_INSIGHTS = "voice-insights"
VOICE_INSIGHTS_CLIENT_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-client-insights-on-demand-minute"
VOICE_INSIGHTS_PTSN_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-ptsn-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_INTERFACE_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-interface-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_TRUNKING_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-trunking-insights-on-demand-minute"
WIRELESS = "wireless"
WIRELESS_ORDERS = "wireless-orders"
WIRELESS_ORDERS_ARTWORK = "wireless-orders-artwork"
WIRELESS_ORDERS_BULK = "wireless-orders-bulk"
WIRELESS_ORDERS_ESIM = "wireless-orders-esim"
WIRELESS_ORDERS_STARTER = "wireless-orders-starter"
WIRELESS_USAGE = "wireless-usage"
WIRELESS_USAGE_COMMANDS = "wireless-usage-commands"
WIRELESS_USAGE_COMMANDS_AFRICA = "wireless-usage-commands-africa"
WIRELESS_USAGE_COMMANDS_ASIA = "wireless-usage-commands-asia"
WIRELESS_USAGE_COMMANDS_CENTRALANDSOUTHAMERICA = "wireless-usage-commands-centralandsouthamerica"
WIRELESS_USAGE_COMMANDS_EUROPE = "wireless-usage-commands-europe"
WIRELESS_USAGE_COMMANDS_HOME = "wireless-usage-commands-home"
WIRELESS_USAGE_COMMANDS_NORTHAMERICA = "wireless-usage-commands-northamerica"
WIRELESS_USAGE_COMMANDS_OCEANIA = "wireless-usage-commands-oceania"
WIRELESS_USAGE_COMMANDS_ROAMING = "wireless-usage-commands-roaming"
WIRELESS_USAGE_DATA = "wireless-usage-data"
WIRELESS_USAGE_DATA_AFRICA = "wireless-usage-data-africa"
WIRELESS_USAGE_DATA_ASIA = "wireless-usage-data-asia"
WIRELESS_USAGE_DATA_CENTRALANDSOUTHAMERICA = "wireless-usage-data-centralandsouthamerica"
WIRELESS_USAGE_DATA_CUSTOM_ADDITIONALMB = "wireless-usage-data-custom-additionalmb"
WIRELESS_USAGE_DATA_CUSTOM_FIRST5MB = "wireless-usage-data-custom-first5mb"
WIRELESS_USAGE_DATA_DOMESTIC_ROAMING = "wireless-usage-data-domestic-roaming"
WIRELESS_USAGE_DATA_EUROPE = "wireless-usage-data-europe"
WIRELESS_USAGE_DATA_INDIVIDUAL_ADDITIONALGB = "wireless-usage-data-individual-additionalgb"
WIRELESS_USAGE_DATA_INDIVIDUAL_FIRSTGB = "wireless-usage-data-individual-firstgb"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_CANADA = "wireless-usage-data-international-roaming-canada"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_INDIA = "wireless-usage-data-international-roaming-india"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_MEXICO = "wireless-usage-data-international-roaming-mexico"
WIRELESS_USAGE_DATA_NORTHAMERICA = "wireless-usage-data-northamerica"
WIRELESS_USAGE_DATA_OCEANIA = "wireless-usage-data-oceania"
WIRELESS_USAGE_DATA_POOLED = "wireless-usage-data-pooled"
WIRELESS_USAGE_DATA_POOLED_DOWNLINK = "wireless-usage-data-pooled-downlink"
WIRELESS_USAGE_DATA_POOLED_UPLINK = "wireless-usage-data-pooled-uplink"
WIRELESS_USAGE_MRC = "wireless-usage-mrc"
WIRELESS_USAGE_MRC_CUSTOM = "wireless-usage-mrc-custom"
WIRELESS_USAGE_MRC_INDIVIDUAL = "wireless-usage-mrc-individual"
WIRELESS_USAGE_MRC_POOLED = "wireless-usage-mrc-pooled"
WIRELESS_USAGE_MRC_SUSPENDED = "wireless-usage-mrc-suspended"
WIRELESS_USAGE_SMS = "wireless-usage-sms"
WIRELESS_USAGE_VOICE = "wireless-usage-voice"
class Recurring(object):
DAILY = "daily"
MONTHLY = "monthly"
YEARLY = "yearly"
ALLTIME = "alltime"
class TriggerField(object):
COUNT = "count"
USAGE = "usage"
PRICE = "price"
def __init__(self, version, payload, account_sid, sid=None):
"""
Initialize the TriggerInstance
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
super(TriggerInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'api_version': payload.get('api_version'),
'callback_method': payload.get('callback_method'),
'callback_url': payload.get('callback_url'),
'current_value': payload.get('current_value'),
'date_created': deserialize.rfc2822_datetime(payload.get('date_created')),
'date_fired': deserialize.rfc2822_datetime(payload.get('date_fired')),
'date_updated': deserialize.rfc2822_datetime(payload.get('date_updated')),
'friendly_name': payload.get('friendly_name'),
'recurring': payload.get('recurring'),
'sid': payload.get('sid'),
'trigger_by': payload.get('trigger_by'),
'trigger_value': payload.get('trigger_value'),
'uri': payload.get('uri'),
'usage_category': payload.get('usage_category'),
'usage_record_uri': payload.get('usage_record_uri'),
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: TriggerContext for this TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
"""
if self._context is None:
self._context = TriggerContext(
self._version,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that this trigger monitors
:rtype: unicode
"""
return self._properties['account_sid']
@property
def api_version(self):
"""
:returns: The API version used to create the resource
:rtype: unicode
"""
return self._properties['api_version']
@property
def callback_method(self):
"""
:returns: The HTTP method we use to call callback_url
:rtype: unicode
"""
return self._properties['callback_method']
@property
def callback_url(self):
"""
:returns: he URL we call when the trigger fires
:rtype: unicode
"""
return self._properties['callback_url']
@property
def current_value(self):
"""
:returns: The current value of the field the trigger is watching
:rtype: unicode
"""
return self._properties['current_value']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_fired(self):
"""
:returns: The RFC 2822 date and time in GMT that the trigger was last fired
:rtype: datetime
"""
return self._properties['date_fired']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the trigger
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def recurring(self):
"""
:returns: The frequency of a recurring UsageTrigger
:rtype: TriggerInstance.Recurring
"""
return self._properties['recurring']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def trigger_by(self):
"""
:returns: The field in the UsageRecord resource that fires the trigger
:rtype: TriggerInstance.TriggerField
"""
return self._properties['trigger_by']
@property
def trigger_value(self):
"""
:returns: The value at which the trigger will fire
:rtype: unicode
"""
return self._properties['trigger_value']
@property
def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri']
@property
def usage_category(self):
"""
:returns: The usage category the trigger watches
:rtype: TriggerInstance.UsageCategory
"""
return self._properties['usage_category']
@property
def usage_record_uri(self):
"""
:returns: The URI of the UsageRecord resource this trigger watches
:rtype: unicode
"""
return self._properties['usage_record_uri']
def fetch(self):
"""
Fetch the TriggerInstance
:returns: The fetched TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
return self._proxy.fetch()
def update(self, callback_method=values.unset, callback_url=values.unset,
friendly_name=values.unset):
"""
Update the TriggerInstance
:param unicode callback_method: The HTTP method to use to call callback_url
:param unicode callback_url: The URL we call when the trigger fires
:param unicode friendly_name: A string to describe the resource
:returns: The updated TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
return self._proxy.update(
callback_method=callback_method,
callback_url=callback_url,
friendly_name=friendly_name,
)
def delete(self):
"""
Deletes the TriggerInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.TriggerInstance {}>'.format(context)
|
|
"""Routines related to PyPI, indexes"""
from __future__ import absolute_import
import cgi
import itertools
import logging
import mimetypes
import os
import posixpath
import re
import sys
from collections import namedtuple
from pip._vendor import html5lib, requests, six
from pip._vendor.distlib.compat import unescape
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.requests.exceptions import HTTPError, RetryError, SSLError
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.download import HAS_TLS, is_url, path_to_url, url_to_path
from pip._internal.exceptions import (
BestVersionAlreadyInstalled, DistributionNotFound, InvalidWheelFilename,
UnsupportedWheel,
)
from pip._internal.models.candidate import InstallationCandidate
from pip._internal.models.format_control import FormatControl
from pip._internal.models.index import PyPI
from pip._internal.models.link import Link
from pip._internal.pep425tags import get_supported
from pip._internal.utils.compat import ipaddress
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, WHEEL_EXTENSION, normalize_path,
redact_password_from_url,
)
from pip._internal.utils.packaging import check_requires_python
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.wheel import Wheel
if MYPY_CHECK_RUNNING:
from logging import Logger
from typing import (
Tuple, Optional, Any, List, Union, Callable, Set, Sequence,
Iterable, MutableMapping
)
from pip._vendor.packaging.version import _BaseVersion
from pip._vendor.requests import Response
from pip._internal.pep425tags import Pep425Tag
from pip._internal.req import InstallRequirement
from pip._internal.download import PipSession
SecureOrigin = Tuple[str, str, Optional[str]]
BuildTag = Tuple[Any, ...] # either empty tuple or Tuple[int, str]
CandidateSortingKey = Tuple[int, _BaseVersion, BuildTag, Optional[int]]
__all__ = ['FormatControl', 'FoundCandidates', 'PackageFinder']
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
] # type: List[SecureOrigin]
logger = logging.getLogger(__name__)
def _match_vcs_scheme(url):
# type: (str) -> Optional[str]
"""Look for VCS schemes in the URL.
Returns the matched VCS scheme, or None if there's no match.
"""
from pip._internal.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
return scheme
return None
def _is_url_like_archive(url):
# type: (str) -> bool
"""Return whether the URL looks like an archive.
"""
filename = Link(url).filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
return True
return False
class _NotHTML(Exception):
def __init__(self, content_type, request_desc):
# type: (str, str) -> None
super(_NotHTML, self).__init__(content_type, request_desc)
self.content_type = content_type
self.request_desc = request_desc
def _ensure_html_header(response):
# type: (Response) -> None
"""Check the Content-Type header to ensure the response contains HTML.
Raises `_NotHTML` if the content type is not text/html.
"""
content_type = response.headers.get("Content-Type", "")
if not content_type.lower().startswith("text/html"):
raise _NotHTML(content_type, response.request.method)
class _NotHTTP(Exception):
pass
def _ensure_html_response(url, session):
# type: (str, PipSession) -> None
"""Send a HEAD request to the URL, and ensure the response contains HTML.
Raises `_NotHTTP` if the URL is not available for a HEAD request, or
`_NotHTML` if the content type is not text/html.
"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in {'http', 'https'}:
raise _NotHTTP()
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
_ensure_html_header(resp)
def _get_html_response(url, session):
# type: (str, PipSession) -> Response
"""Access an HTML page with GET, and return the response.
This consists of three parts:
1. If the URL looks suspiciously like an archive, send a HEAD first to
check the Content-Type is HTML, to avoid downloading a large file.
Raise `_NotHTTP` if the content type cannot be determined, or
`_NotHTML` if it is not HTML.
2. Actually perform the request. Raise HTTP exceptions on network failures.
3. Check the Content-Type header to make sure we got HTML, and raise
`_NotHTML` otherwise.
"""
if _is_url_like_archive(url):
_ensure_html_response(url, session=session)
logger.debug('Getting page %s', redact_password_from_url(url))
resp = session.get(
url,
headers={
"Accept": "text/html",
# We don't want to blindly returned cached data for
# /simple/, because authors generally expecting that
# twine upload && pip install will function, but if
# they've done a pip install in the last ~10 minutes
# it won't. Thus by setting this to zero we will not
# blindly use any cached data, however the benefit of
# using max-age=0 instead of no-cache, is that we will
# still support conditional requests, so we will still
# minimize traffic sent in cases where the page hasn't
# changed at all, we will just always incur the round
# trip for the conditional GET now instead of only
# once per 10 minutes.
# For more information, please see pypa/pip#5670.
"Cache-Control": "max-age=0",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
_ensure_html_header(resp)
return resp
def _handle_get_page_fail(
link, # type: Link
reason, # type: Union[str, Exception]
meth=None # type: Optional[Callable[..., None]]
):
# type: (...) -> None
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
def _get_html_page(link, session=None):
# type: (Link, Optional[PipSession]) -> Optional[HTMLPage]
if session is None:
raise TypeError(
"_get_html_page() missing 1 required keyword argument: 'session'"
)
url = link.url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
logger.debug('Cannot look at %s URL %s', vcs_scheme, link)
return None
# Tack index.html onto file:// URLs that point to directories
scheme, _, path, _, _, _ = urllib_parse.urlparse(url)
if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
try:
resp = _get_html_response(url, session=session)
except _NotHTTP:
logger.debug(
'Skipping page %s because it looks like an archive, and cannot '
'be checked by HEAD.', link,
)
except _NotHTML as exc:
logger.debug(
'Skipping page %s because the %s request got Content-Type: %s',
link, exc.request_desc, exc.content_type,
)
except HTTPError as exc:
_handle_get_page_fail(link, exc)
except RetryError as exc:
_handle_get_page_fail(link, exc)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: "
reason += str(exc)
_handle_get_page_fail(link, reason, meth=logger.info)
except requests.ConnectionError as exc:
_handle_get_page_fail(link, "connection error: %s" % exc)
except requests.Timeout:
_handle_get_page_fail(link, "timed out")
else:
return HTMLPage(resp.content, resp.url, resp.headers)
return None
class CandidateEvaluator(object):
def __init__(
self,
valid_tags, # type: List[Pep425Tag]
prefer_binary=False # type: bool
):
# type: (...) -> None
self._prefer_binary = prefer_binary
self._valid_tags = valid_tags
def is_wheel_supported(self, wheel):
# type: (Wheel) -> bool
return wheel.supported(self._valid_tags)
def _sort_key(self, candidate):
# type: (InstallationCandidate) -> CandidateSortingKey
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self._valid_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(self._valid_tags)
build_tag = tuple() # type: BuildTag
binary_preference = 0
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported(self._valid_tags):
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
if self._prefer_binary:
binary_preference = 1
pri = -(wheel.support_index_min(self._valid_tags))
if wheel.build_tag is not None:
match = re.match(r'^(\d+)(.*)$', wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
return (binary_preference, candidate.version, build_tag, pri)
def get_best_candidate(self, candidates):
# type: (List[InstallationCandidate]) -> InstallationCandidate
"""
Return the best candidate per the instance's sort order, or None if
no candidates are given.
"""
if not candidates:
return None
return max(candidates, key=self._sort_key)
class FoundCandidates(object):
"""A collection of candidates, returned by `PackageFinder.find_candidates`.
This class is only intended to be instantiated by PackageFinder through
the `from_specifier()` constructor.
Arguments:
* `candidates`: A sequence of all available candidates found.
* `specifier`: Specifier to filter applicable versions.
* `prereleases`: Whether prereleases should be accounted. Pass None to
infer from the specifier.
* `evaluator`: A CandidateEvaluator object to sort applicable candidates
by order of preference.
"""
def __init__(
self,
candidates, # type: List[InstallationCandidate]
versions, # type: Set[str]
evaluator, # type: CandidateEvaluator
):
# type: (...) -> None
self._candidates = candidates
self._evaluator = evaluator
self._versions = versions
@classmethod
def from_specifier(
cls,
candidates, # type: List[InstallationCandidate]
specifier, # type: specifiers.BaseSpecifier
prereleases, # type: Optional[bool]
evaluator, # type: CandidateEvaluator
):
# type: (...) -> FoundCandidates
versions = {
str(v) for v in specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
(str(c.version) for c in candidates),
prereleases=prereleases,
)
}
return cls(candidates, versions, evaluator)
def iter_all(self):
# type: () -> Iterable[InstallationCandidate]
"""Iterate through all candidates.
"""
return iter(self._candidates)
def iter_applicable(self):
# type: () -> Iterable[InstallationCandidate]
"""Iterate through candidates matching the versions associated with
this instance.
"""
# Again, converting version to str to deal with debundling.
return (c for c in self.iter_all() if str(c.version) in self._versions)
def get_best(self):
# type: () -> Optional[InstallationCandidate]
"""Return the best candidate available, or None if no applicable
candidates are found.
"""
candidates = list(self.iter_applicable())
return self._evaluator.get_best_candidate(candidates)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(
self,
find_links, # type: List[str]
index_urls, # type: List[str]
allow_all_prereleases=False, # type: bool
trusted_hosts=None, # type: Optional[Iterable[str]]
session=None, # type: Optional[PipSession]
format_control=None, # type: Optional[FormatControl]
platform=None, # type: Optional[str]
versions=None, # type: Optional[List[str]]
abi=None, # type: Optional[str]
implementation=None, # type: Optional[str]
prefer_binary=False # type: bool
):
# type: (...) -> None
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
:param platform: A string or None. If None, searches for packages
that are supported by the current system. Otherwise, will find
packages that can be built on the platform passed in. These
packages will only be downloaded for distribution: they will
not be built locally.
:param versions: A list of strings or None. This is passed directly
to pep425tags.py in the get_supported() method.
:param abi: A string or None. This is passed directly
to pep425tags.py in the get_supported() method.
:param implementation: A string or None. This is passed directly
to pep425tags.py in the get_supported() method.
:param prefer_binary: Whether to prefer an old, but valid, binary
dist over a new source dist.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = [] # type: List[str]
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
# These are boring links that have already been logged somehow:
self.logged_links = set() # type: Set[Link]
self.format_control = format_control or FormatControl(set(), set())
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
] # type: List[SecureOrigin]
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# The Session we'll use to make requests
self.session = session
# The valid tags to check potential found wheel candidates against
valid_tags = get_supported(
versions=versions,
platform=platform,
abi=abi,
impl=implementation,
)
self.candidate_evaluator = CandidateEvaluator(
valid_tags=valid_tags, prefer_binary=prefer_binary,
)
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def get_formatted_locations(self):
# type: () -> str
lines = []
if self.index_urls and self.index_urls != [PyPI.simple_url]:
lines.append(
"Looking in indexes: {}".format(", ".join(
redact_password_from_url(url) for url in self.index_urls))
)
if self.find_links:
lines.append(
"Looking in links: {}".format(", ".join(self.find_links))
)
return "\n".join(lines)
@staticmethod
def _sort_locations(locations, expand_dir=False):
# type: (Sequence[str], bool) -> Tuple[List[str], List[str]]
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
else:
logger.warning(
"Path '{0}' is ignored: "
"it is a directory.".format(path),
)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url,
)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url,
)
return files, urls
def _validate_secure_origin(self, logger, location):
# type: (Logger, Link) -> bool
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
protocol = origin[0].rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
if protocol != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
# setting secure_origin[1] to proper Union[bytes, str]
# creates problems in other places
else secure_origin[1].decode("utf8") # type: ignore
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if (origin[1] and
origin[1].lower() != secure_origin[1].lower() and
secure_origin[1] != "*"):
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS we "
"recommend you use HTTPS instead, otherwise you may silence "
"this warning and allow it anyway with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
# type: (str) -> List[str]
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(
url,
urllib_parse.quote(canonicalize_name(project_name)))
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
return [mkurl_pypi_url(url) for url in self.index_urls]
def find_all_candidates(self, project_name):
# type: (str) -> List[Optional[InstallationCandidate]]
"""Find all available InstallationCandidate for project_name
This checks index_urls and find_links.
All versions found are returned as an InstallationCandidate list.
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True,
)
file_locations = (Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc,
))
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links.
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = canonicalize_name(project_name)
formats = self.format_control.get_allowed_formats(canonical_name)
search = Search(project_name, canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f') for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.iter_links(), search)
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return file_versions + find_links_versions + page_versions
def find_candidates(
self,
project_name, # type: str
specifier=None, # type: Optional[specifiers.BaseSpecifier]
):
"""Find matches for the given project and specifier.
If given, `specifier` should implement `filter` to allow version
filtering (e.g. ``packaging.specifiers.SpecifierSet``).
Returns a `FoundCandidates` instance.
"""
if specifier is None:
specifier = specifiers.SpecifierSet()
return FoundCandidates.from_specifier(
self.find_all_candidates(project_name),
specifier=specifier,
prereleases=(self.allow_all_prereleases or None),
evaluator=self.candidate_evaluator,
)
def find_requirement(self, req, upgrade):
# type: (InstallRequirement, bool) -> Optional[Link]
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a Link if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
candidates = self.find_candidates(req.name, req.specifier)
best_candidate = candidates.get_best()
installed_version = None # type: Optional[_BaseVersion]
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
def _format_versions(cand_iter):
# This repeated parse_version and str() conversion is needed to
# handle different vendoring sources from pip and pkg_resources.
# If we stop using the pkg_resources provided specifier and start
# using our own, we can drop the cast to str().
return ", ".join(sorted(
{str(c.version) for c in cand_iter},
key=parse_version,
)) or "none"
if installed_version is None and best_candidate is None:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
_format_versions(candidates.iter_all()),
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
_format_versions(candidates.iter_applicable()),
)
raise BestVersionAlreadyInstalled
logger.debug(
'Using version %s (newest of versions: %s)',
best_candidate.version,
_format_versions(candidates.iter_applicable()),
)
return best_candidate.location
def _get_pages(self, locations, project_name):
# type: (Iterable[Link], str) -> Iterable[HTMLPage]
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors.
"""
seen = set() # type: Set[Link]
for location in locations:
if location in seen:
continue
seen.add(location)
page = _get_html_page(location, session=self.session)
if page is None:
continue
yield page
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
# type: (Iterable[Link]) -> List[Link]
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set() # type: Set[Link]
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(
self,
links, # type: Iterable[Link]
search # type: Search
):
# type: (...) -> List[Optional[InstallationCandidate]]
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
# type: (Link, str) -> None
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
# type: (Link, Search) -> Optional[InstallationCandidate]
"""Return an InstallationCandidate or None"""
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return None
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext,
)
return None
if "binary" not in search.formats and ext == WHEEL_EXTENSION:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied,
)
return None
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return None
if ext == WHEEL_EXTENSION:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return None
if canonicalize_name(wheel.name) != search.canonical:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return None
if not self.candidate_evaluator.is_wheel_supported(wheel):
self._log_skipped_link(
link, 'it is not compatible with this Python')
return None
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != WHEEL_EXTENSION:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied,
)
return None
if not version:
version = _egg_info_matches(egg_info, search.canonical)
if not version:
self._log_skipped_link(
link, 'Missing project version for %s' % search.supplied)
return None
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return None
try:
support_this_python = check_requires_python(link.requires_python)
except specifiers.InvalidSpecifier:
logger.debug("Package %s has an invalid Requires-Python entry: %s",
link.filename, link.requires_python)
support_this_python = True
if not support_this_python:
logger.debug("The package %s is incompatible with the python "
"version in use. Acceptable python versions are: %s",
link, link.requires_python)
return None
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _find_name_version_sep(egg_info, canonical_name):
# type: (str, str) -> int
"""Find the separator's index based on the package's canonical name.
`egg_info` must be an egg info string for the given package, and
`canonical_name` must be the package's canonical name.
This function is needed since the canonicalized name does not necessarily
have the same length as the egg info's name part. An example::
>>> egg_info = 'foo__bar-1.0'
>>> canonical_name = 'foo-bar'
>>> _find_name_version_sep(egg_info, canonical_name)
8
"""
# Project name and version must be separated by one single dash. Find all
# occurrences of dashes; if the string in front of it matches the canonical
# name, this is the one separating the name and version parts.
for i, c in enumerate(egg_info):
if c != "-":
continue
if canonicalize_name(egg_info[:i]) == canonical_name:
return i
raise ValueError("{} does not match {}".format(egg_info, canonical_name))
def _egg_info_matches(egg_info, canonical_name):
# type: (str, str) -> Optional[str]
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param canonical_name: The canonicalized name of the package this
belongs to.
"""
try:
version_start = _find_name_version_sep(egg_info, canonical_name) + 1
except ValueError:
return None
version = egg_info[version_start:]
if not version:
return None
return version
def _determine_base_url(document, page_url):
"""Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
"""
for base in document.findall(".//base"):
href = base.get("href")
if href is not None:
return href
return page_url
def _get_encoding_from_headers(headers):
"""Determine if we have any encoding information in our headers.
"""
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
return params['charset']
return None
def _clean_link(url):
# type: (str) -> str
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
# Split the URL into parts according to the general structure
# `scheme://netloc/path;parameters?query#fragment`. Note that the
# `netloc` can be empty and the URI will then refer to a local
# filesystem path.
result = urllib_parse.urlparse(url)
# In both cases below we unquote prior to quoting to make sure
# nothing is double quoted.
if result.netloc == "":
# On Windows the path part might contain a drive letter which
# should not be quoted. On Linux where drive letters do not
# exist, the colon should be quoted. We rely on urllib.request
# to do the right thing here.
path = urllib_request.pathname2url(
urllib_request.url2pathname(result.path))
else:
# In addition to the `/` character we protect `@` so that
# revision strings in VCS URLs are properly parsed.
path = urllib_parse.quote(urllib_parse.unquote(result.path), safe="/@")
return urllib_parse.urlunparse(result._replace(path=path))
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None):
# type: (bytes, str, MutableMapping[str, str]) -> None
self.content = content
self.url = url
self.headers = headers
def __str__(self):
return redact_password_from_url(self.url)
def iter_links(self):
# type: () -> Iterable[Link]
"""Yields all links in the page"""
document = html5lib.parse(
self.content,
transport_encoding=_get_encoding_from_headers(self.headers),
namespaceHTMLElements=False,
)
base_url = _determine_base_url(document, self.url)
for anchor in document.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = _clean_link(urllib_parse.urljoin(base_url, href))
pyrequire = anchor.get('data-requires-python')
pyrequire = unescape(pyrequire) if pyrequire else None
yield Link(url, self.url, requires_python=pyrequire)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
|
|
"""Search (Chapters 3-4)
The way to use this code is to subclass Problem to create a class of problems,
then create problem instances and solve them with calls to the various search
functions."""
from utils import *
import sys
import math
import random
#______________________________________________________________________________
class Problem:
"""The abstract class for a formal problem. You should subclass this and
implement the method successor, and possibly __init__, goal_test, and
path_cost. Then you will create instances of your subclass and solve them
with the various search functions."""
def __init__(self, initial, goal=None):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = initial
self.goal = goal
def successor(self, state):
"""Given a state, return a sequence of (action, state) pairs reachable
from this state. If there are many successors, consider an iterator
that yields the successors one at a time, rather than building them
all at once. Iterators will work fine within the framework."""
abstract
def goal_test(self, state):
"""Return True if the state is a goal. The default method compares the
state to self.goal, as specified in the constructor. Implement this
method if checking against a single self.goal is not enough."""
return state == self.goal
def path_cost(self, c, state1, action, state2):
"""Return the cost of a solution path that arrives at state2 from
state1 via action, assuming cost c to get up to state1. If the problem
is such that the path doesn't matter, this function will only look at
state2. If the path does matter, it will consider c and maybe state1
and action. The default method costs 1 for every step in the path."""
return c + 1
def value(self, state):
"""For optimization problems, each state has a value. Hill-climbing
and related algorithms try to maximize this value."""
abstract
#______________________________________________________________________________
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None, path_cost=0):
"Create a search tree Node, derived from a parent by an action."
self.state = state
self.parent = parent
self.action = action
self.path_cost = path_cost
if parent:
self.depth = parent.depth + 1
else:
self.depth = 0
def __repr__(self):
return "<Node %s>" % (self.state,)
def path(self):
"Create a list of nodes from the root to this node."
x, result = self, [self]
while x.parent:
result.append(x.parent)
x = x.parent
return result
def expand(self, problem):
"Yield the nodes reachable from this node. [Fig. 3.8]"
for (act, next) in problem.successor(self.state):
yield Node(next, self, act,
problem.path_cost(self.path_cost, self.state, act, next))
#______________________________________________________________________________
# Uninformed Search algorithms
def tree_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
Don't worry about repeated paths to a state. [Fig. 3.8]"""
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
fringe.extend(node.expand(problem))
return None
def breadth_first_tree_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return tree_search(problem, FIFOQueue())
def depth_first_tree_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return tree_search(problem, Stack())
def graph_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
If two paths reach a state, only use the best one. [Fig. 3.18]"""
closed = {}
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
if node.state not in closed:
closed[node.state] = True
fringe.extend(node.expand(problem))
return None
def breadth_first_graph_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return graph_search(problem, FIFOQueue())
def depth_first_graph_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return graph_search(problem, Stack())
def depth_limited_search(problem, limit=50):
"[Fig. 3.12]"
def recursive_dls(node, problem, limit):
cutoff_occurred = False
if problem.goal_test(node.state):
return node
elif node.depth == limit:
return 'cutoff'
else:
for successor in node.expand(problem):
result = recursive_dls(successor, problem, limit)
if result == 'cutoff':
cutoff_occurred = True
elif result != None:
return result
if cutoff_occurred:
return 'cutoff'
else:
return None
# Body of depth_limited_search:
return recursive_dls(Node(problem.initial), problem, limit)
def iterative_deepening_search(problem):
"[Fig. 3.13]"
for depth in range(sys.maxsize):
result = depth_limited_search(problem, depth)
if result is not 'cutoff':
return result
#______________________________________________________________________________
# Informed (Heuristic) Search
def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have depth-first search."""
return graph_search(problem, PriorityQueue(f, min))
def astar_graph_search(problem, h):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search."""
def f(n):
return n.path_cost + h(n)
return best_first_graph_search(problem, f)
#____________________________________________________________________________
# Local Search Algorithms
class LSNode:
"""A node in a local search. You will not need to subclass this class
for local search."""
def __init__(self, problem, state, step):
"""Create a local search Node."""
self.problem = problem
self.state = state
self.step = step
self._value = None
def __repr__(self):
return "<Node %s>" % (self.state,)
def value(self, state=None):
"""Returns the value of the state contained in this node."""
if self._value is None:
self._value = self.problem.value(self.state)
return self._value
def expand(self):
"""Yields nodes reachable from this node. [Fig. 3.8]"""
for (act, next) in self.problem.successor(self.state):
yield LSNode(self.problem, next, self.step + 1)
def random_walk(problem, limit=100, callback=None):
"""Perform a random walk in the search space and return the best solution
found. The returned value is a Node.
If callback is not None, it must be a one-argument function that will be
called at each step with the current node.
"""
current = LSNode(problem, problem.initial, 0)
best = current
for step in range(limit):
if callback is not None:
callback(current)
current = random.choice(list(current.expand()))
if current.value() > best.value():
best = current
return best
def exp_schedule(k=20, lam=0.05, limit=100):
"""One possible schedule function for simulated annealing"""
return lambda t: (k * math.exp(-lam * t) if t < limit else 0)
def simulated_annealing(problem, schedule=exp_schedule(), callback=None):
"""[Fig. 4.5]
If callback is not None, it must be a one-argument function that will be
called at each step with the current node.
"""
current = LSNode(problem, problem.initial, 0)
best = current
for t in range(sys.maxsize):
if callback is not None:
callback(current)
T = schedule(t)
if T == 0:
return best
next = random.choice(list(current.expand()))
delta_e = next.value() - current.value()
if delta_e > 0 or math.exp(delta_e / T) > random.uniform(0.0, 1.0):
current = next
if(current.value() > best.value()):
best = current
else:
current = LSNode(problem, current.state, t + 1)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import sys
import unittest
from libcloud.dns.drivers.onapp import OnAppDNSDriver
from libcloud.dns.types import RecordType
from libcloud.test import LibcloudTestCase, MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_ONAPP
from libcloud.utils.py3 import httplib
from libcloud.common.exceptions import BaseHTTPError
class OnAppDNSTests(LibcloudTestCase):
def setUp(self):
OnAppDNSDriver.connectionCls.conn_class = OnAppDNSMockHttp
OnAppDNSMockHttp.type = None
self.driver = OnAppDNSDriver(*DNS_PARAMS_ONAPP)
def assertHasKeys(self, dictionary, keys):
for key in keys:
self.assertTrue(key in dictionary, 'key "%s" not in dictionary' %
(key))
def test_list_record_types(self):
record_types = self.driver.list_record_types()
self.assertEqual(len(record_types), 8)
self.assertTrue(RecordType.A in record_types)
self.assertTrue(RecordType.AAAA in record_types)
self.assertTrue(RecordType.CNAME in record_types)
self.assertTrue(RecordType.MX in record_types)
self.assertTrue(RecordType.NS in record_types)
self.assertTrue(RecordType.SOA in record_types)
self.assertTrue(RecordType.SRV in record_types)
self.assertTrue(RecordType.TXT in record_types)
def test_list_zones_success(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 2)
zone1 = zones[0]
self.assertEqual(zone1.id, '1')
self.assertEqual(zone1.type, 'master')
self.assertEqual(zone1.domain, 'example.com')
self.assertEqual(zone1.ttl, 1200)
self.assertHasKeys(zone1.extra, ['user_id', 'cdn_reference',
'created_at', 'updated_at'])
zone2 = zones[1]
self.assertEqual(zone2.id, '2')
self.assertEqual(zone2.type, 'master')
self.assertEqual(zone2.domain, 'example.net')
self.assertEqual(zone2.ttl, 1200)
self.assertHasKeys(zone2.extra, ['user_id', 'cdn_reference',
'created_at', 'updated_at'])
def test_get_zone_success(self):
zone1 = self.driver.get_zone(zone_id='1')
self.assertEqual(zone1.id, '1')
self.assertEqual(zone1.type, 'master')
self.assertEqual(zone1.domain, 'example.com')
self.assertEqual(zone1.ttl, 1200)
self.assertHasKeys(zone1.extra, ['user_id', 'cdn_reference',
'created_at', 'updated_at'])
def test_get_zone_not_found(self):
OnAppDNSMockHttp.type = 'NOT_FOUND'
try:
self.driver.get_zone(zone_id='3')
except BaseHTTPError:
self.assertRaises(Exception)
def test_create_zone_success(self):
OnAppDNSMockHttp.type = 'CREATE'
zone = self.driver.create_zone(domain='example.com')
self.assertEqual(zone.id, '1')
self.assertEqual(zone.domain, 'example.com')
self.assertEqual(zone.ttl, 1200)
self.assertEqual(zone.type, 'master')
self.assertHasKeys(zone.extra, ['user_id', 'cdn_reference',
'created_at', 'updated_at'])
def test_delete_zone(self):
zone = self.driver.get_zone(zone_id='1')
OnAppDNSMockHttp.type = 'DELETE'
self.assertTrue(self.driver.delete_zone(zone))
def test_list_records_success(self):
zone = self.driver.get_zone(zone_id='1')
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 5)
record1 = records[0]
self.assertEqual(record1.id, '111222')
self.assertEqual(record1.name, '@')
self.assertEqual(record1.type, RecordType.A)
self.assertEqual(record1.ttl, 3600)
self.assertEqual(record1.data['ip'], '123.156.189.1')
record2 = records[2]
self.assertEqual(record2.id, '111224')
self.assertEqual(record2.name, 'mail')
self.assertEqual(record1.ttl, 3600)
self.assertEqual(record2.type, RecordType.CNAME)
self.assertEqual(record2.data['hostname'], 'examplemail.com')
record3 = records[4]
self.assertEqual(record3.id, '111226')
self.assertEqual(record3.name, '@')
self.assertEqual(record3.type, RecordType.MX)
self.assertEqual(record3.data['hostname'], 'mx2.examplemail.com')
def test_get_record_success(self):
record = self.driver.get_record(zone_id='1',
record_id='123')
self.assertEqual(record.id, '123')
self.assertEqual(record.name, '@')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data['ip'], '123.156.189.1')
def test_create_record_success(self):
zone = self.driver.get_zone(zone_id='1')
OnAppDNSMockHttp.type = 'CREATE'
record = self.driver.create_record(name='blog', zone=zone,
type=RecordType.A,
data='123.156.189.2')
self.assertEqual(record.id, '111227')
self.assertEqual(record.name, 'blog')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data['ip'], '123.156.189.2')
self.assertEqual(record.data['ttl'], 3600)
def test_update_record_success(self):
record = self.driver.get_record(zone_id='1',
record_id='123')
OnAppDNSMockHttp.type = 'UPDATE'
extra = {'ttl': 4500}
record1 = self.driver.update_record(record=record, name='@',
type=record.type,
data='123.156.189.2',
extra=extra)
self.assertEqual(record.data['ip'], '123.156.189.1')
self.assertEqual(record.ttl, 3600)
self.assertEqual(record1.data['ip'], '123.156.189.2')
self.assertEqual(record1.ttl, 4500)
def test_delete_record_success(self):
record = self.driver.get_record(zone_id='1',
record_id='123')
OnAppDNSMockHttp.type = 'DELETE'
status = self.driver.delete_record(record=record)
self.assertTrue(status)
class OnAppDNSMockHttp(MockHttp):
fixtures = DNSFileFixtures('onapp')
def _dns_zones_json(self, method, url, body, headers):
body = self.fixtures.load('list_zones.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _dns_zones_1_json(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _dns_zones_3_json_NOT_FOUND(self, method, url, body, headers):
body = self.fixtures.load('dns_zone_not_found.json')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _dns_zones_json_CREATE(self, method, url, body, headers):
body = self.fixtures.load('create_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _dns_zones_1_json_DELETE(self, method, url, body, headers):
return (httplib.NO_CONTENT, '', {},
httplib.responses[httplib.NO_CONTENT])
def _dns_zones_1_records_json(self, method, url, body, headers):
body = self.fixtures.load('list_records.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _dns_zones_1_records_123_json(self, method, url, body, headers):
body = self.fixtures.load('get_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _dns_zones_1_records_json_CREATE(self, method, url, body, headers):
body = self.fixtures.load('create_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _dns_zones_1_records_123_json_UPDATE(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('get_record_after_update.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
return (httplib.NO_CONTENT, '', {},
httplib.responses[httplib.NO_CONTENT])
def _dns_zones_1_json_UPDATE(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _dns_zones_1_records_123_json_DELETE(self, method, url, body, headers):
return (httplib.NO_CONTENT, '', {},
httplib.responses[httplib.NO_CONTENT])
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
#!/usr/bin/env python3
# Copyright (c) 2018-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the Partially Signed Transaction RPCs.
"""
from decimal import Decimal
from itertools import product
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
find_output,
)
from test_framework.wallet_util import bytes_to_wif
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# Create one-input, one-output, no-fee transaction:
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [
["-walletrbf=1", "-addresstype=bech32", "-changetype=bech32"], #TODO: Remove address type restrictions once taproot has psbt extensions
["-walletrbf=0", "-changetype=legacy"],
[]
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# TODO: Re-enable this test with segwit v1
def test_utxo_conversion(self):
mining_node = self.nodes[2]
offline_node = self.nodes[0]
online_node = self.nodes[1]
# Disconnect offline node from others
# Topology of test network is linear, so this one call is enough
self.disconnect_nodes(0, 1)
# Create watchonly on online_node
online_node.createwallet(wallet_name='wonline', disable_private_keys=True)
wonline = online_node.get_wallet_rpc('wonline')
w2 = online_node.get_wallet_rpc('')
# Mine a transaction that credits the offline address
offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
online_addr = w2.getnewaddress(address_type="p2sh-segwit")
wonline.importaddress(offline_addr, "", False)
mining_node.sendtoaddress(address=offline_addr, amount=1.0)
self.generate(mining_node, nblocks=1)
# Construct an unsigned PSBT on the online node (who doesn't know the output is Segwit, so will include a non-witness UTXO)
utxos = wonline.listunspent(addresses=[offline_addr])
raw = wonline.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
psbt = wonline.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
assert "non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0]
# Have the offline node sign the PSBT (which will update the UTXO to segwit)
signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
assert "witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0]
# Make sure we can mine the resulting transaction
txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
self.generate(mining_node, 1)
assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)
wonline.unloadwallet()
# Reconnect
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
def assert_change_type(self, psbtx, expected_type):
"""Assert that the given PSBT has a change output with the given type."""
# The decodepsbt RPC is stateless and independent of any settings, we can always just call it on the first node
decoded_psbt = self.nodes[0].decodepsbt(psbtx["psbt"])
changepos = psbtx["changepos"]
assert_equal(decoded_psbt["tx"]["vout"][changepos]["scriptPubKey"]["type"], expected_type)
def run_test(self):
# Create and fund a raw tx for sending 10 BTC
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# If inputs are specified, do not automatically add more:
utxo1 = self.nodes[0].listunspent()[0]
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[0].walletcreatefundedpsbt, [{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90})
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90}, 0, {"add_inputs": True})['psbt']
assert_equal(len(self.nodes[0].decodepsbt(psbtx1)['tx']['vin']), 2)
# Inputs argument can be null
self.nodes[0].walletcreatefundedpsbt(None, {self.nodes[2].getnewaddress():10})
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Node 0 should not be able to sign the transaction with the wallet is locked
self.nodes[0].encryptwallet("password")
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].walletprocesspsbt, psbtx)
# Node 0 should be able to process without signing though
unsigned_tx = self.nodes[0].walletprocesspsbt(psbtx, False)
assert_equal(unsigned_tx['complete'], False)
self.nodes[0].walletpassphrase(passphrase="password", timeout=1000000)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbt=psbtx, finalize=False)['psbt']
finalized_tx = self.nodes[0].walletprocesspsbt(psbt=psbtx, finalize=True)['psbt']
assert signed_tx != finalized_tx
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Manually selected inputs can be locked:
assert_equal(len(self.nodes[0].listlockunspent()), 0)
utxo1 = self.nodes[0].listunspent()[0]
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0,{"lockUnspents": True})["psbt"]
assert_equal(len(self.nodes[0].listlockunspent()), 1)
# Locks are ignored for manually selected inputs
self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
# Setup watchonly wallets
self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[2].get_wallet_rpc('wmulti')
# Create all the addresses
p2sh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
if not self.options.descriptors:
wmulti.importaddress(p2sh)
wmulti.importaddress(p2wsh)
wmulti.importaddress(p2sh_p2wsh)
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.generate(self.nodes[0], 6)
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['address'] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2pkh:
p2pkh_pos = out['n']
inputs = [{"txid": txid, "vout": p2wpkh_pos}, {"txid": txid, "vout": p2sh_p2wpkh_pos}, {"txid": txid, "vout": p2pkh_pos}]
outputs = [{self.nodes[1].getnewaddress(): 29.99}]
# spend single key from node 1
created_psbt = self.nodes[1].walletcreatefundedpsbt(inputs, outputs)
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(created_psbt['psbt'])
# Make sure it has both types of UTXOs
decoded = self.nodes[1].decodepsbt(walletprocesspsbt_out['psbt'])
assert 'non_witness_utxo' in decoded['inputs'][0]
assert 'witness_utxo' in decoded['inputs'][0]
# Check decodepsbt fee calculation (input values shall only be counted once per UTXO)
assert_equal(decoded['fee'], created_psbt['fee'])
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
self.log.info("Test walletcreatefundedpsbt fee rate of 10000 sat/vB and 0.1 BTC/kvB produces a total fee at or slightly below -maxtxfee (~0.05290000)")
res1 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": 10000, "add_inputs": True})
assert_approx(res1["fee"], 0.055, 0.005)
res2 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": "0.1", "add_inputs": True})
assert_approx(res2["fee"], 0.055, 0.005)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed, e.g. a fee_rate under 1 sat/vB is allowed")
res3 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": "0.999", "add_inputs": True})
assert_approx(res3["fee"], 0.00000381, 0.0000001)
res4 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": 0.00000999, "add_inputs": True})
assert_approx(res4["fee"], 0.00000381, 0.0000001)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed and that funding non-standard 'zero-fee' transactions is valid")
for param, zero_value in product(["fee_rate", "feeRate"], [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]):
assert_equal(0, self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {param: zero_value, "add_inputs": True})["fee"])
self.log.info("Test invalid fee rate settings")
for param, value in {("fee_rate", 100000), ("feeRate", 1)}:
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: value, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount out of range",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: -1, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount is not a number or string",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: {"foo": "bar"}, "add_inputs": True})
# Test fee rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: invalid_value, "add_inputs": True})
# Test fee_rate values that cannot be represented in sat/vB.
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": invalid_value, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and fee_rate are passed")
assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (PART/kvB)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": 0.1, "feeRate": 0.1, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and estimate_mode passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": "economical", "feeRate": 0.1, "add_inputs": True})
for param in ["feeRate", "fee_rate"]:
self.log.info("- raises RPC error if both {} and conf_target are passed".format(param))
assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation "
"target in blocks for automatic fee estimation, or an explicit fee rate.".format(param),
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {param: 1, "conf_target": 1, "add_inputs": True})
self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {"fee_rate": 1, "estimate_mode": "economical", "add_inputs": True})
self.log.info("- raises RPC error with invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type string for estimate_mode, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": v, "conf_target": 0.1, "add_inputs": True})
for mode in ["", "foo", Decimal("3.141592")]:
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": 0.1, "add_inputs": True})
self.log.info("- raises RPC error with invalid conf_target settings")
for mode in ["unset", "economical", "conservative"]:
self.log.debug("{}".format(mode))
for k, v in {"string": "", "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type number for conf_target, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": v, "add_inputs": True})
for n in [-1, 0, 1009]:
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": n, "add_inputs": True})
self.log.info("Test walletcreatefundedpsbt with too-high fee rate produces total fee well above -maxtxfee and raises RPC error")
# previously this was silently capped at -maxtxfee
for bool_add, outputs_array in {True: outputs, False: [{self.nodes[1].getnewaddress(): 1}]}.items():
msg = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)"
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"fee_rate": 1000000, "add_inputs": bool_add})
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"feeRate": 1, "add_inputs": bool_add})
self.log.info("Test various PSBT operations")
# partially sign multisig things with node 1
psbtx = wmulti.walletcreatefundedpsbt(inputs=[{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], outputs={self.nodes[1].getnewaddress():29.99}, options={'changeAddress': self.nodes[1].getrawchangeaddress()})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# Unload wmulti, we don't need it anymore
wmulti.unloadwallet()
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a non-psbt with signatures cannot be converted
# Error could be either "TX decode failed" (segwit inputs causes parsing to fail) or "Inputs must not have scriptSigs and scriptWitnesses"
# We must set iswitness=True because the serialized transaction has inputs and is therefore a witness transaction
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], iswitness=True)
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False, iswitness=True)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 = self.nodes[0].sendtoaddress(node2_addr, 13)
blockhash = self.generate(self.nodes[0], 6)[0]
vout1 = find_output(self.nodes[1], txid1, 13, blockhash=blockhash)
vout2 = find_output(self.nodes[2], txid2, 13, blockhash=blockhash)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
# Check that BIP32 path was added
assert "bip32_derivs" in psbt1_decoded['inputs'][0]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Check that BIP32 paths were not added
assert "bip32_derivs" not in psbt2_decoded['inputs'][1]
# Sign PSBTs (workaround issue #18039)
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.generate(self.nodes[0], 6)
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable": False, "add_inputs": True}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set and RBF explicitly enabled
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {"replaceable": True, "add_inputs": True}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Same construction without optional arguments, for a node with -walletrbf=0
unspent1 = self.nodes[1].listunspent()[0]
psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height, {"add_inputs": True})
decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)
# Make sure the wallet's change type is respected by default
small_output = {self.nodes[0].getnewaddress():0.1}
psbtx_native = self.nodes[0].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_native, "witness_v0_keyhash")
psbtx_legacy = self.nodes[1].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_legacy, "pubkeyhash")
# Make sure the change type of the wallet can also be overwritten
psbtx_np2wkh = self.nodes[1].walletcreatefundedpsbt([], [small_output], 0, {"change_type":"p2sh-segwit"})
self.assert_change_type(psbtx_np2wkh, "scripthash")
# Make sure the change type cannot be specified if a change address is given
invalid_options = {"change_type":"legacy","changeAddress":self.nodes[0].getnewaddress()}
assert_raises_rpc_error(-8, "both change address and address type options", self.nodes[0].walletcreatefundedpsbt, [], [small_output], 0, invalid_options)
# Regression test for 14473 (mishandling of already-signed witness transaction):
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], 0, {"add_inputs": True})
complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])
double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])
assert_equal(complete_psbt, double_processed_psbt)
# We don't care about the decode result, but decoding must succeed.
self.nodes[0].decodepsbt(double_processed_psbt["psbt"])
# Make sure unsafe inputs are included if specified
self.nodes[2].createwallet(wallet_name="unsafe")
wunsafe = self.nodes[2].get_wallet_rpc("unsafe")
self.nodes[0].sendtoaddress(wunsafe.getnewaddress(), 2)
self.sync_mempools()
assert_raises_rpc_error(-4, "Insufficient funds", wunsafe.walletcreatefundedpsbt, [], [{self.nodes[0].getnewaddress(): 1}])
wunsafe.walletcreatefundedpsbt([], [{self.nodes[0].getnewaddress(): 1}], 0, {"include_unsafe": True})
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet(wallet_name="wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'], True, "ALL")['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Empty combiner test
assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Unload extra wallets
for i, signer in enumerate(signers):
self.nodes[2].unloadwallet("wallet{}".format(i))
# TODO: Re-enable this for segwit v1
# self.test_utxo_conversion()
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress(address_type='legacy')
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
# Test decoding error: invalid base64
assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")
# Send to all types of addresses
addr1 = self.nodes[1].getnewaddress("", "bech32")
txid1 = self.nodes[0].sendtoaddress(addr1, 11)
vout1 = find_output(self.nodes[0], txid1, 11)
addr2 = self.nodes[1].getnewaddress("", "legacy")
txid2 = self.nodes[0].sendtoaddress(addr2, 11)
vout2 = find_output(self.nodes[0], txid2, 11)
addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid3 = self.nodes[0].sendtoaddress(addr3, 11)
vout3 = find_output(self.nodes[0], txid3, 11)
self.sync_all()
def test_psbt_input_keys(psbt_input, keys):
"""Check that the psbt input has only the expected keys."""
assert_equal(set(keys), set(psbt_input.keys()))
# Create a PSBT. None of the inputs are filled initially
psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999})
decoded = self.nodes[1].decodepsbt(psbt)
test_psbt_input_keys(decoded['inputs'][0], [])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Update a PSBT with UTXOs from the node
# Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness
updated = self.nodes[1].utxoupdatepsbt(psbt)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Try again, now while providing descriptors, making P2SH-segwit work, and causing bip32_derivs and redeem_script to be filled in
descs = [self.nodes[1].getaddressinfo(addr)['desc'] for addr in [addr1,addr2,addr3]]
updated = self.nodes[1].utxoupdatepsbt(psbt=psbt, descriptors=descs)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'bip32_derivs'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], ['witness_utxo', 'bip32_derivs', 'redeem_script'])
# Two PSBTs with a common input should not be joinable
psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')})
assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])
# Join two distinct PSBTs
addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid4 = self.nodes[0].sendtoaddress(addr4, 5)
vout4 = find_output(self.nodes[0], txid4, 5)
self.generate(self.nodes[0], 6)
psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')})
psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]
joined = self.nodes[0].joinpsbts([psbt, psbt2])
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
# Check that joining shuffles the inputs and outputs
# 10 attempts should be enough to get a shuffled join
shuffled = False
for _ in range(10):
shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
shuffled |= joined != shuffled_joined
if shuffled:
break
assert shuffled
# Newly created PSBT needs UTXOs and updating
addr = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid = self.nodes[0].sendtoaddress(addr, 7)
addrinfo = self.nodes[1].getaddressinfo(addr)
blockhash = self.generate(self.nodes[0], 6)[0]
vout = find_output(self.nodes[0], txid, 7, blockhash=blockhash)
psbt = self.nodes[1].createpsbt([{"txid":txid, "vout":vout}], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')})
analyzed = self.nodes[0].analyzepsbt(psbt)
assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater'
# After update with wallet, only needs signing
updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt']
analyzed = self.nodes[0].analyzepsbt(updated)
assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program']
# Check fee and size things
assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == Decimal('0.00746268')
# After signing and finalizing, needs extracting
signed = self.nodes[1].walletprocesspsbt(updated)['psbt']
analyzed = self.nodes[0].analyzepsbt(signed)
assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor'
self.log.info("PSBT spending unspendable outputs should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWAEHYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFv8/wADXYP/7//////8JxOh0LR2HAI8AAAAAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHEAABAACAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHENkMak8AAAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 spends unspendable output')
self.log.info("PSBT with invalid values should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8AgIFq49AHABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value')
self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA')
assert_equal(analysis['next'], 'finalizer')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 specifies invalid prevout')
assert_raises_rpc_error(-25, 'Inputs missing or spent', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
self.log.info("Test that we can fund psbts with external inputs specified")
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
self.nodes[1].createwallet("extfund")
wallet = self.nodes[1].get_wallet_rpc("extfund")
# Make a weird but signable script. sh(pkh()) descriptor accomplishes this
desc = descsum_create("sh(pkh({}))".format(privkey))
if self.options.descriptors:
res = self.nodes[0].importdescriptors([{"desc": desc, "timestamp": "now"}])
else:
res = self.nodes[0].importmulti([{"desc": desc, "timestamp": "now"}])
assert res[0]["success"]
addr = self.nodes[0].deriveaddresses(desc)[0]
addr_info = self.nodes[0].getaddressinfo(addr)
self.nodes[0].sendtoaddress(addr, 10)
self.nodes[0].sendtoaddress(wallet.getnewaddress(), 10)
self.generate(self.nodes[0], 6)
ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0]
# An external input without solving data should result in an error
assert_raises_rpc_error(-4, "Insufficient funds", wallet.walletcreatefundedpsbt, [ext_utxo], {self.nodes[0].getnewaddress(): 15})
# But funding should work when the solving data is provided
psbt = wallet.walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {"add_inputs": True, "solving_data": {"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"]]}})
signed = wallet.walletprocesspsbt(psbt['psbt'])
assert not signed['complete']
signed = self.nodes[0].walletprocesspsbt(signed['psbt'])
assert signed['complete']
self.nodes[0].finalizepsbt(signed['psbt'])
psbt = wallet.walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {"add_inputs": True, "solving_data":{"descriptors": [desc]}})
signed = wallet.walletprocesspsbt(psbt['psbt'])
assert not signed['complete']
signed = self.nodes[0].walletprocesspsbt(signed['psbt'])
assert signed['complete']
final = self.nodes[0].finalizepsbt(signed['psbt'], False)
dec = self.nodes[0].decodepsbt(signed["psbt"])
for i, txin in enumerate(dec["tx"]["vin"]):
if txin["txid"] == ext_utxo["txid"] and txin["vout"] == ext_utxo["vout"]:
input_idx = i
break
psbt_in = dec["inputs"][input_idx]
# Calculate the input weight
# (prevout + sequence + length of scriptSig + 2 bytes buffer) * 4 + len of scriptwitness
len_scriptsig = len(psbt_in["final_scriptSig"]["hex"]) // 2 if "final_scriptSig" in psbt_in else 0
len_scriptwitness = len(psbt_in["final_scriptwitness"]["hex"]) // 2 if "final_scriptwitness" in psbt_in else 0
input_weight = ((41 + len_scriptsig + 2) * 4) + len_scriptwitness
low_input_weight = input_weight // 2
high_input_weight = input_weight * 2
# Input weight error conditions
assert_raises_rpc_error(
-8,
"Input weights should be specified in inputs rather than in options.",
wallet.walletcreatefundedpsbt,
inputs=[ext_utxo],
outputs={self.nodes[0].getnewaddress(): 15},
options={"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 1000}]}
)
# Funding should also work if the input weight is provided
psbt = wallet.walletcreatefundedpsbt(
inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}],
outputs={self.nodes[0].getnewaddress(): 15},
options={"add_inputs": True}
)
signed = wallet.walletprocesspsbt(psbt["psbt"])
signed = self.nodes[0].walletprocesspsbt(signed["psbt"])
final = self.nodes[0].finalizepsbt(signed["psbt"])
assert self.nodes[0].testmempoolaccept([final["hex"]])[0]["allowed"]
# Reducing the weight should have a lower fee
psbt2 = wallet.walletcreatefundedpsbt(
inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": low_input_weight}],
outputs={self.nodes[0].getnewaddress(): 15},
options={"add_inputs": True}
)
assert_greater_than(psbt["fee"], psbt2["fee"])
# Increasing the weight should have a higher fee
psbt2 = wallet.walletcreatefundedpsbt(
inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],
outputs={self.nodes[0].getnewaddress(): 15},
options={"add_inputs": True}
)
assert_greater_than(psbt2["fee"], psbt["fee"])
# The provided weight should override the calculated weight when solving data is provided
psbt3 = wallet.walletcreatefundedpsbt(
inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],
outputs={self.nodes[0].getnewaddress(): 15},
options={'add_inputs': True, "solving_data":{"descriptors": [desc]}}
)
assert_equal(psbt2["fee"], psbt3["fee"])
# Import the external utxo descriptor so that we can sign for it from the test wallet
if self.options.descriptors:
res = wallet.importdescriptors([{"desc": desc, "timestamp": "now"}])
else:
res = wallet.importmulti([{"desc": desc, "timestamp": "now"}])
assert res[0]["success"]
# The provided weight should override the calculated weight for a wallet input
psbt3 = wallet.walletcreatefundedpsbt(
inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],
outputs={self.nodes[0].getnewaddress(): 15},
options={"add_inputs": True}
)
assert_equal(psbt2["fee"], psbt3["fee"])
if __name__ == '__main__':
PSBTTest().main()
|
|
from collections import deque
import os
from time import sleep
import numpy as np
import yaml
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from robovis import *
from robovis import RVArmVis
offset_increment = 1.08
start_param = 'elevator_length'
class RVWindow(QMainWindow):
def __init__(self):
QWidget.__init__(self)
self._active = True
central_widget = QWidget()
self.setCentralWidget(central_widget)
layout = QHBoxLayout(central_widget)
layout.setContentsMargins(0,0,0,0)
self.initMenu()
# Core configuration for the arm (the one that gets modified directly)
self.current_config = RVConfig()
self.current_config.subscribe('changed', self.configModified)
# leftFiller = QWidget()
paramPane = RVParamPane(self, self.current_config)
# Graphics
self.scene = QGraphicsScene()
self.view = RVView(self.scene)
# Fill in scene
self.ik = RVIK(self.current_config)
self.createOutlines()
self.heatmap = RVHeatmap(self.scene, self.ik)
self.histogram = RVLoadHistogram(self.ik)
self.ghost_outlines = deque()
self.main_outline = RVOutline(
self.scene,
color=Qt.white,
thickness=4)
self.main_outline.update(self.ik)
self.show_heatmap = True
def toggleHeatmap():
self.show_heatmap = not self.show_heatmap
if self.show_heatmap:
self.heatmap.show()
else:
self.heatmap.hide()
self.show_ghosts = True
def toggleGhosts():
self.show_ghosts = not self.show_ghosts
if self.show_ghosts:
for outline in self.outlines:
outline.show()
else:
for outline in self.outlines:
outline.hide()
# Arm vis
self.arm_vis = RVArmVis(self.current_config, self.view)
self.selected_arm_vis = RVArmVis(self.current_config,
self.view,
thickness=2,
color=QColor(180, 180, 180),
show_forces=False,
show_coords=False)
# Fill in layout
self.selection_pane = RVSelectionPane(self.selected_arm_vis, self.arm_vis,
self.main_outline, self.outlines)
layout.addWidget(self.selection_pane)
layout.addWidget(self.view, 1)
splitter = QWidget()
splitter_layout = QVBoxLayout(splitter)
splitter_layout.setContentsMargins(0,0,0,0)
layout.addWidget(splitter)
heatmap_button = QPushButton('Toggle Heatmap')
heatmap_button.clicked.connect(toggleHeatmap)
ghosts_button = QPushButton('Toggle Ghost Outlines')
ghosts_button.clicked.connect(toggleGhosts)
splitter_layout.addWidget(heatmap_button)
splitter_layout.addWidget(ghosts_button)
splitter_layout.addWidget(paramPane)
splitter_layout.addWidget(self.histogram)
# Hook up the view mouse events
self.view.subscribe('mouseMove', self.arm_vis.handleMouseMove)
self.view.subscribe('mouseLeave', lambda e: self.arm_vis.clearGraphics())
self.view.subscribe('mousePress', self.viewClick)
self.current_param = start_param
self.createIKPool()
self.updateGhosts()
QTimer.singleShot(0, self.asyncPoll)
def viewClick(self, event):
if event.button() == Qt.LeftButton:
pt = self.view.mapToScene(event.pos())
self.selected_arm_vis.changeGoal([pt.x(), pt.y()])
def initMenu(self):
exitAction = QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit RoboVis')
exitAction.triggered.connect(self.close)
loadAction = QAction('&Load Config', self)
loadAction.setShortcut('Ctrl+O')
loadAction.setStatusTip('Load an existing configuration file')
loadAction.triggered.connect(self.loadConfig)
saveAction = QAction('&Save Config', self)
saveAction.setShortcut('Ctrl+S')
saveAction.setStatusTip('Save the current configuration to a file')
saveAction.triggered.connect(self.saveConfig)
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(loadAction)
fileMenu.addAction(saveAction)
fileMenu.addAction(exitAction)
self.setWindowTitle('RoboVis')
def saveConfig(self):
'''Saves the current configuration using a system file dialog'''
path = QFileDialog.getSaveFileName(self, 'Save file',
'',"YAML files (*.yml *.yaml)")[0]
if path != '':
with open(path, 'w') as file:
data = yaml.dump(
self.current_config.getRaw(),
default_flow_style=False,
explicit_start=True)
file.write(data)
print('Saved config to {0}'.format(path))
def loadConfig(self):
'''Loads a configuration using a system file dialog'''
path = QFileDialog.getOpenFileName(self, 'Open file',
'', 'YAML files (*.yml *.yaml)')[0]
if path != '':
with open(path, 'r') as file:
raw = yaml.load(file.read())
self.current_config.loadRaw(raw)
print('Loaded config from {0}'.format(path))
def updateGhosts(self):
p = self.current_param
q = self.solvers[p]
current_val = self.current_config[p].value
modified = False
# Identify out-of-range solvers
cleared_low = cleared_high = 0
for solver in q:
if solver.data['val'] < current_val / offset_increment**4:
cleared_low += 1
modified = True
elif solver.data['val'] > current_val * offset_increment**4:
cleared_high += 1
modified = True
# Flip out-of-range solvers to other side
for i in range(cleared_low):
top = q[-1]
solver = q.popleft()
q.append(solver)
# Begin solving
new_config = RVConfig(self.current_config)
new_val = top.data['val'] * offset_increment
new_config[p].value = new_val
solver.data['val'] = new_val
solver.solveAsync(new_config)
for i in range(cleared_high):
bottom = q[0]
solver = q.pop()
q.appendleft(solver)
# Begin solving
new_config = RVConfig(self.current_config)
new_val = bottom.data['val'] / offset_increment
new_config[p].value = new_val
solver.data['val'] = new_val
solver.solveAsync(new_config)
# Resolve any changes
if modified:
self.latchOutlines()
# Update colors
for outline in self.outlines:
val = outline.solver.data['val']
diff = val - current_val
denom = abs(current_val*offset_increment**3 - current_val)
max_dim = 800
if denom > 0:
norm_diff = abs(diff) / denom
else:
norm_diff = 1
norm_diff = 1-norm_diff
if diff < 0:
color = QColor(50, 50, 255, norm_diff*255)
outline.setColor(color)
else:
color = QColor(230, 230, 50, norm_diff*255)
outline.setColor(color)
def setCurrentParam(self, param):
if param not in self.solvers.keys():
print('Warning: param ', param, ' not currently solved for')
else:
self.current_param = param
self.latchOutlines()
self.updateGhosts()
self.selection_pane.update()
def configModified(self):
'''Call when the configuration has been modified - regenerates the outline(s)'''
# self.selected_arm_vis.update()
self.solvers['main'][0].solveLocal(self.current_config)
self.updateGhosts()
self.solvePerpendicular()
def createOutlines(self):
self.outlines = deque()
for i in range(6):
self.outlines.append(RVOutline(self.scene,
color=Qt.white,
thickness=2.5,
style=Qt.DashLine))
def createIKPool(self):
# 'None' yields automatic sizing (enough to use all available cores)
self.ik_pool = RVWorkerPool(None)
self.solvers = {}
params = [
'min_load',
'actuator_torque',
'elevator_torque',
'rod_ratio',
'forearm_length',
'elevator_length',
]
# Create the full set of solvers across all parameters
for p in params:
q = self.solvers[p] = deque()
# 4 each of higher and lower slots
for i in range(8):
self.solvers[p].append(RVSolver(self.ik_pool))
self.latchOutlines()
self.solveParamSet(self.current_param)
self.solvePerpendicular()
# The main/central solver is in a section of its own
self.solvers['main'] = [RVSolver(self.ik_pool)]
self.solvers['main'][0].subscribe('ready', self.ikComplete)
def solvePerpendicular(self):
'''Starts pre-solving ghosts for the additional parameters'''
for p, q in self.solvers.items():
if p in ('main', self.current_param):
continue
else:
self.solveParamSet(p)
def solveParamSet(self, param):
'''Begins solving ghost outlines for the given parameter'''
p = param
q = self.solvers[p]
# Iterate through offset parameters and start solvers
upper_val = lower_val = self.current_config[p].value
for i in range(4):
config_less = RVConfig(self.current_config)
config_more = RVConfig(self.current_config)
upper_val *= offset_increment
lower_val /= offset_increment
config_less[p].value = lower_val
config_more[p].value = upper_val
lower_solver = q[3-i]
upper_solver = q[4+i]
lower_solver.solveAsync(config_less)
upper_solver.solveAsync(config_more)
lower_solver.data['val'] = lower_val
upper_solver.data['val'] = upper_val
def latchOutlines(self):
'''Latches outlines from outline pool to solvers for current param'''
for outline in self.outlines:
if outline.solver:
outline.solver.removeOutline()
p = self.current_param
q = self.solvers[p]
for i in range(3):
outline_l = self.outlines[i]
outline_r = self.outlines[3 + i]
q[3 - i].setOutline(outline_l)
q[4 + i].setOutline(outline_r)
def asyncPoll(self):
while self._active:
sleep(0.01)
self.ik_pool.poll()
# See if any solvers have finished, results are automatically
# cascaded out
for p, set in self.solvers.items():
for solver in set:
solver.poll()
qApp.processEvents()
def ikComplete(self, ik):
'''Called when the main solver completes'''
self.main_outline.update(ik)
self.heatmap.update(ik)
self.histogram.update(ik)
self.selected_arm_vis.update()
def sizeHint(self):
return QSize(1280, 720)
def closeEvent(self, event):
# Smash up our async workers
self.ik_pool.terminate()
self._active = False
|
|
from os import path
from django.conf import settings
from django.shortcuts import render, redirect
from django.views.decorators.cache import never_cache
from django.utils.translation import LANGUAGE_SESSION_KEY, check_for_language, ugettext_lazy as _
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset, password_reset_confirm, logout_then_login, login as contrib_login
from logging import getLogger
from functools import partial
from gui.vm.guacamole import GuacamoleAuth
from gui.models import User
from gui.accounts.forms import LoginForm, ForgotForm, SMSSendPasswordResetForm, PasswordResetForm
from gui.accounts.utils import get_client_ip, clear_attempts_cache
from gui.decorators import logout_required
from api.decorators import setting_required
from api.email import sendmail
from api.sms.views import internal_send as send_sms
from api.sms.exceptions import SMSError
from vms.models import DefaultDc
logger = getLogger(__name__)
auth_logger = getLogger('gui.auth')
# noinspection PyShadowingBuiltins
def setlang(request):
"""
Sets a user's language preference and redirects to a given URL or, by default, back to the previous page.
"""
next = request.GET.get('next', None)
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = redirect(next)
lang_code = request.GET.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN)
return response
@logout_required
@setting_required('REGISTRATION_ENABLED')
def registration_done(request):
"""
Confirmation page after successful registration.
"""
dc_settings = request.dc.settings
dc1_settings = DefaultDc().settings
text_blocks = [
_('Thank you for registering at %s.') % dc_settings.SITE_NAME,
_('You should receive an email shortly. Please click on the link in the email to activate your account.'),
_('If you don\'t receive an email, please check your spam folder.'),
]
if dc1_settings.SMS_REGISTRATION_ENABLED:
text_blocks.append(_('Once your account is active, you will receive a text message (SMS) with your password.'))
return render(request, 'gui/note.html', {
'header': _('Registration almost complete!'),
'blocks': text_blocks,
})
def send_post_register_email(request, user):
# Send optional email after successful registration (issue #261)
template_path = path.join(settings.PROJECT_DIR, 'gui', 'templates')
subject = 'gui/accounts/post_register_subject.txt'
subject_path = path.join(template_path, subject)
body_file_prefix = 'gui/accounts/post_register_email'
body = None
if path.exists(subject_path) and path.exists(path.join(template_path, body_file_prefix + '.html')):
body = body_file_prefix + '.html'
elif path.exists(subject_path) and path.exists(path.join(template_path, body_file_prefix + '.txt')):
body = body_file_prefix + '.txt'
if body:
sendmail(user, subject, body, dc=request.dc)
else:
logger.info('Post registration email subject template: "%s" or body template: "%s" does not exists.' %
(subject_path, path.join(template_path, body_file_prefix + '.[html|txt]')))
def send_registration_sms(request, profile, password):
msg = _('Welcome to %(site_name)s, your new password is: %(password)s') % {
'site_name': request.dc.settings.SITE_NAME,
'password': password
}
try:
send_sms(profile.phone, msg)
except SMSError:
return False
else:
return True
@logout_required
@setting_required('REGISTRATION_ENABLED')
@never_cache
def registration_check(request, uidb64=None, token=None):
"""
Email verification page, generating password and sending it to user.
"""
assert uidb64 is not None and token is not None
success = False
token_verified = False
dc_settings = request.dc.settings
dc1_settings = DefaultDc().settings
sms_registration = dc1_settings.SMS_REGISTRATION_ENABLED
try:
user = User.objects.get(id=urlsafe_base64_decode(uidb64))
profile = user.userprofile
except (ValueError, OverflowError, User.DoesNotExist):
user = None
profile = None
if profile and user.last_login <= user.date_joined and profile.email_token == token:
token_verified = True
# Set default user type
profile.usertype = dc1_settings.PROFILE_USERTYPE_DEFAULT
# Email address is verified
profile.email_token = ''
profile.email_verified = True
# This may look strange - setting the phone_verified before the user logs in. It is not :) Actually we have
# the last_login field, which should be set to None at this point. So we know that the user never logged in and
# after the user logs in we would set phone_verified to True anyway.
if sms_registration:
profile.phone_verified = True
profile.save()
if sms_registration:
# Generate new password
password = User.objects.make_random_password(length=7)
user.set_password(password)
else:
password = None
user.is_active = True
user.save()
if password:
# Send new password to user via SMS
success = send_registration_sms(request, profile, password)
else:
success = True
try:
send_post_register_email(request, user)
except Exception as exc:
logger.exception(exc)
return render(request, 'gui/accounts/register_check.html', {
'user': user,
'profile': profile,
'sms_registration': sms_registration,
'success': success,
'token_verified': token_verified,
'site_name': dc_settings.SITE_NAME,
'support_email': dc_settings.SUPPORT_EMAIL,
})
@logout_required
@setting_required('REGISTRATION_ENABLED')
def forgot_passwd(request):
"""
User password reset page.
"""
dc_settings = request.dc.settings
return password_reset(
request,
template_name='gui/accounts/forgot.html',
email_template_name='gui/accounts/forgot_email.txt',
subject_template_name='gui/accounts/forgot_subject.txt',
password_reset_form=partial(ForgotForm, request),
post_reset_redirect=reverse('forgot_done'),
from_email=dc_settings.DEFAULT_FROM_EMAIL,
current_app='gui',
extra_context={
'e_site_name': dc_settings.SITE_NAME,
'e_site_link': dc_settings.SITE_LINK,
})
@logout_required
@setting_required('REGISTRATION_ENABLED')
def forgot_passwd_done(request):
"""
Confirmation page after successful password reset request.
"""
return render(request, 'gui/note.html', {
'header': _('Password reset instructions!'),
'blocks': (
_('We\'ve emailed you instructions for setting your password. You should be receiving them shortly.'),
_('If you don\'t receive an email, please make sure you\'ve entered the address you registered with, and '
'check your spam folder.'),
)
})
@logout_required
@setting_required('REGISTRATION_ENABLED')
@never_cache
def forgot_passwd_check(request, uidb64=None, token=None):
"""
Page that checks the hash in a password reset link, generates a new password which is send via SMS to the user.
"""
assert uidb64 is not None and token is not None
dc1_settings = DefaultDc().settings
sms_registration = dc1_settings.SMS_REGISTRATION_ENABLED
if sms_registration:
set_password_form = SMSSendPasswordResetForm
else:
set_password_form = PasswordResetForm
if request.method == 'POST':
try:
user = User.objects.get(id=urlsafe_base64_decode(uidb64))
profile = user.userprofile
except (ValueError, OverflowError, User.DoesNotExist):
profile = None
if profile and profile.email_token == token:
# Email address is verified, we cant compare to token as register token is different to reset one.
profile.email_token = ''
profile.email_verified = True
# This may look strange - setting the phone_verified before the user logs in. It is not :) We are sending
# new password to phone number in profile, after the user logs in we would set phone_verified to True anyway
if sms_registration:
profile.phone_verified = True
profile.save()
return password_reset_confirm(
request,
uidb64=uidb64,
token=token,
template_name='gui/accounts/forgot_check.html',
set_password_form=set_password_form,
post_reset_redirect=reverse('forgot_check_done'),
current_app='gui',
extra_context={
'sms_registration': sms_registration,
}
)
@logout_required
@setting_required('REGISTRATION_ENABLED')
def forgot_passwd_check_done(request):
"""
Confirmation page after successful password reset.
"""
dc1_settings = DefaultDc().settings
if dc1_settings.SMS_REGISTRATION_ENABLED:
text_blocks = (_('Your password has been reset and send to your phone number via text message (SMS).'),)
else:
text_blocks = ()
return render(request, 'gui/note.html', {
'header': _('Password reset!'),
'blocks': text_blocks,
'links': ({'label': 'You may go ahead and log in now.', 'url': reverse('login')},),
})
@logout_required
def login(request):
"""
Log users in the system and re-direct them to dashboard or show proper error message when failed.
"""
response = contrib_login(request, 'gui/accounts/login.html', authentication_form=partial(LoginForm, request))
# Setup i18n settings into session
if request.method == 'POST':
user = request.user
if user.is_authenticated():
auth_logger.info('User %s successfully logged in from %s (%s)', user, get_client_ip(request),
request.META.get('HTTP_USER_AGENT', ''))
user.userprofile.activate_locale(request)
clear_attempts_cache(request, user.username)
else:
auth_logger.warning('User %s login failed from %s (%s)', request.POST.get('username', None),
get_client_ip(request), request.META.get('HTTP_USER_AGENT', ''))
return response
@login_required
def logout(request):
"""
Log users out (destroy all sessions) and re-direct them to the main page.
"""
# Save profile and user object
user = request.user
profile = request.user.userprofile
# Create guacamole object attached to request.user.username and with current guacamole password
g = GuacamoleAuth(request)
# Do a guacamole logout
gcookie = g.logout()
# We can then remove the cached configuration
g.del_auth()
# Get the response object
response = logout_then_login(request)
# Remove the guacamole cookie from response object
response.delete_cookie(**gcookie['cookie'])
# Setup i18n settings of the logged in user into session of an anonymous user
profile.activate_locale(request)
# Get auth logger and log the logout :)
auth_logger.info('User %s successfully logged out from %s (%s)',
user, get_client_ip(request), request.META.get('HTTP_USER_AGENT', ''))
# Bye bye
return response
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Migration(SchemaMigration):
depends_on = (
("images", "0001_initial"),
("articles", "0001_initial"),
("containers", "0001_initial"),
)
def forwards(self, orm):
# Adding model 'Poll'
db.create_table(u'polls_poll', (
(u'container_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['containers.Container'], unique=True, primary_key=True)),
('multiple_choices', self.gf('django.db.models.fields.BooleanField')(default=False)),
('max_multiple_choices', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('min_multiple_choices', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('display_choice_images', self.gf('django.db.models.fields.BooleanField')(default=False)),
('headline', self.gf('django.db.models.fields.TextField')(blank=True)),
('date_end', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('order', self.gf('django.db.models.fields.IntegerField')(default=0)),
('show_results', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'polls', ['Poll'])
# Adding model 'PollPost'
db.create_table(u'polls_pollpost', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('post', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='pollpost_post', null=True, on_delete=models.SET_NULL, to=orm['articles.Post'])),
('poll', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='poll', null=True, on_delete=models.SET_NULL, to=orm['polls.Poll'])),
))
db.send_create_signal(u'polls', ['PollPost'])
# Adding model 'Choice'
db.create_table(u'polls_choice', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('poll', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['polls.Poll'])),
('choice', self.gf('django.db.models.fields.CharField')(max_length=255)),
('votes', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='choice_image', null=True, on_delete=models.SET_NULL, to=orm['images.Image'])),
('order', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'polls', ['Choice'])
def backwards(self, orm):
# Deleting model 'Poll'
db.delete_table(u'polls_poll')
# Deleting model 'PollPost'
db.delete_table(u'polls_pollpost')
# Deleting model 'Choice'
db.delete_table(u'polls_choice')
models = {
u'%s.%s' % (User._meta.app_label, User._meta.module_name): {
'Meta': {'object_name': User.__name__},
},
u'articles.album': {
'Meta': {'object_name': 'Album'},
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['containers.Container']", 'unique': 'True', 'primary_key': 'True'}),
'headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'})
},
u'articles.post': {
'Meta': {'object_name': 'Post'},
'albums': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'post_albums'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['articles.Album']"}),
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['containers.Container']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'related_posts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'post_relatedposts'", 'to': u"orm['containers.Container']", 'through': u"orm['articles.PostRelated']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'})
},
u'articles.postrelated': {
'Meta': {'ordering': "('order',)", 'object_name': 'PostRelated'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'postrelated_post'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['articles.Post']"}),
'related': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'postrelated_related'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['containers.Container']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'channels.channel': {
'Meta': {'ordering': "['name', 'parent__id', 'published']", 'unique_together': "(('site', 'long_slug', 'slug', 'parent'),)", 'object_name': 'Channel'},
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_in_main_rss': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'layout': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '250', 'db_index': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'long_slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'subchannel'", 'null': 'True', 'to': u"orm['channels.Channel']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'show_in_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'containers.container': {
'Meta': {'ordering': "['-date_available']", 'unique_together': "(('site', 'child_class', 'channel_long_slug', 'slug'),)", 'object_name': 'Container'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['channels.Channel']"}),
'channel_long_slug': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'channel_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '140', 'null': 'True', 'blank': 'True'}),
'child_app_label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_class': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_module': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '120', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'hat': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['images.Image']", 'null': 'True', 'through': u"orm['containers.ContainerImage']", 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'containers_container_mainimage'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['images.Image']"}),
'main_image_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_containers.container_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'show_on_root_channel': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'containers.containerimage': {
'Meta': {'ordering': "('order',)", 'object_name': 'ContainerImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['containers.Container']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['images.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'images.image': {
'Meta': {'object_name': 'Image'},
'archive': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'crop_example': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'crop_x1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_x2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fit_in': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'halign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'smart': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)}),
'valign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'})
},
u'polls.choice': {
'Meta': {'ordering': "['order']", 'object_name': 'Choice'},
'choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'choice_image'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['images.Image']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polls.Poll']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'polls.poll': {
'Meta': {'ordering': "['order']", 'object_name': 'Poll', '_ormbases': [u'containers.Container']},
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['containers.Container']", 'unique': 'True', 'primary_key': 'True'}),
'date_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'display_choice_images': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'headline': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'max_multiple_choices': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'min_multiple_choices': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'multiple_choices': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'posts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'poll_post'", 'to': u"orm['articles.Post']", 'through': u"orm['polls.PollPost']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'show_results': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'polls.pollpost': {
'Meta': {'object_name': 'PollPost'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'poll'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['polls.Poll']"}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pollpost_post'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['articles.Post']"})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['polls']
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/signaling-protocols/segment-routing/interfaces/interface/sid-counters/sid-counter/forwarding-classes/forwarding-class/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Per-SID, per forwarding class counters for Segment Routing
with the MPLS dataplane
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__exp",
"__in_pkts",
"__in_octets",
"__out_pkts",
"__out_octets",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__exp = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="exp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__in_pkts = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_octets = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_pkts = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_octets = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"signaling-protocols",
"segment-routing",
"interfaces",
"interface",
"sid-counters",
"sid-counter",
"forwarding-classes",
"forwarding-class",
"state",
]
def _get_exp(self):
"""
Getter method for exp, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/exp (uint8)
YANG Description: The value of the MPLS EXP (experimental) or Traffic Class bits that the
SID statistics relate to. Packets received with a MPLS label value
equal to the SID's MPLS label and EXP bits equal to the this value
should be counted towards the associated ingress statistics. Packets
that are forwarded to the destination MPLS label corresponding to the
SID should be counted towards this value. In the egress direction, where
forwarding follows a SID value that requires PHP at the local node,
packets should still be counted towards the egress counters.
"""
return self.__exp
def _set_exp(self, v, load=False):
"""
Setter method for exp, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/exp (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_exp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_exp() directly.
YANG Description: The value of the MPLS EXP (experimental) or Traffic Class bits that the
SID statistics relate to. Packets received with a MPLS label value
equal to the SID's MPLS label and EXP bits equal to the this value
should be counted towards the associated ingress statistics. Packets
that are forwarded to the destination MPLS label corresponding to the
SID should be counted towards this value. In the egress direction, where
forwarding follows a SID value that requires PHP at the local node,
packets should still be counted towards the egress counters.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="exp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """exp must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..7']}), is_leaf=True, yang_name="exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__exp = t
if hasattr(self, "_set"):
self._set()
def _unset_exp(self):
self.__exp = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="exp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_in_pkts(self):
"""
Getter method for in_pkts, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/in_pkts (yang:counter64)
YANG Description: A cumulative counter of the packets received within the context
which have matched a label corresponding to an SR Segment Identifier.
"""
return self.__in_pkts
def _set_in_pkts(self, v, load=False):
"""
Setter method for in_pkts, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/in_pkts (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_pkts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_pkts() directly.
YANG Description: A cumulative counter of the packets received within the context
which have matched a label corresponding to an SR Segment Identifier.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_pkts must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_pkts = t
if hasattr(self, "_set"):
self._set()
def _unset_in_pkts(self):
self.__in_pkts = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_octets(self):
"""
Getter method for in_octets, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/in_octets (yang:counter64)
YANG Description: The cumulative counter of the total bytes received within the context
which have matched a label corresponding to an SR Segment Identifier
"""
return self.__in_octets
def _set_in_octets(self, v, load=False):
"""
Setter method for in_octets, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/in_octets (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_octets is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_octets() directly.
YANG Description: The cumulative counter of the total bytes received within the context
which have matched a label corresponding to an SR Segment Identifier
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_octets must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_octets = t
if hasattr(self, "_set"):
self._set()
def _unset_in_octets(self):
self.__in_octets = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_pkts(self):
"""
Getter method for out_pkts, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/out_pkts (yang:counter64)
YANG Description: A cumulative counter of the total number of packets transmitted by
the local system within the context which have a label imposed that
corresponds to an Segment Identifier.
"""
return self.__out_pkts
def _set_out_pkts(self, v, load=False):
"""
Setter method for out_pkts, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/out_pkts (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_pkts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_pkts() directly.
YANG Description: A cumulative counter of the total number of packets transmitted by
the local system within the context which have a label imposed that
corresponds to an Segment Identifier.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_pkts must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_pkts = t
if hasattr(self, "_set"):
self._set()
def _unset_out_pkts(self):
self.__out_pkts = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_octets(self):
"""
Getter method for out_octets, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/out_octets (yang:counter64)
YANG Description: A cumulative counter of the total bytes transmitted by the local
system within the context which have a label imported that
corresponds to an SR Segment Identifier.
"""
return self.__out_octets
def _set_out_octets(self, v, load=False):
"""
Setter method for out_octets, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/out_octets (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_octets is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_octets() directly.
YANG Description: A cumulative counter of the total bytes transmitted by the local
system within the context which have a label imported that
corresponds to an SR Segment Identifier.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_octets must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_octets = t
if hasattr(self, "_set"):
self._set()
def _unset_out_octets(self):
self.__out_octets = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
exp = __builtin__.property(_get_exp)
in_pkts = __builtin__.property(_get_in_pkts)
in_octets = __builtin__.property(_get_in_octets)
out_pkts = __builtin__.property(_get_out_pkts)
out_octets = __builtin__.property(_get_out_octets)
_pyangbind_elements = OrderedDict(
[
("exp", exp),
("in_pkts", in_pkts),
("in_octets", in_octets),
("out_pkts", out_pkts),
("out_octets", out_octets),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/signaling-protocols/segment-routing/interfaces/interface/sid-counters/sid-counter/forwarding-classes/forwarding-class/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Per-SID, per forwarding class counters for Segment Routing
with the MPLS dataplane
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__exp",
"__in_pkts",
"__in_octets",
"__out_pkts",
"__out_octets",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__exp = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="exp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__in_pkts = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__in_octets = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_pkts = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
self.__out_octets = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"signaling-protocols",
"segment-routing",
"interfaces",
"interface",
"sid-counters",
"sid-counter",
"forwarding-classes",
"forwarding-class",
"state",
]
def _get_exp(self):
"""
Getter method for exp, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/exp (uint8)
YANG Description: The value of the MPLS EXP (experimental) or Traffic Class bits that the
SID statistics relate to. Packets received with a MPLS label value
equal to the SID's MPLS label and EXP bits equal to the this value
should be counted towards the associated ingress statistics. Packets
that are forwarded to the destination MPLS label corresponding to the
SID should be counted towards this value. In the egress direction, where
forwarding follows a SID value that requires PHP at the local node,
packets should still be counted towards the egress counters.
"""
return self.__exp
def _set_exp(self, v, load=False):
"""
Setter method for exp, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/exp (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_exp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_exp() directly.
YANG Description: The value of the MPLS EXP (experimental) or Traffic Class bits that the
SID statistics relate to. Packets received with a MPLS label value
equal to the SID's MPLS label and EXP bits equal to the this value
should be counted towards the associated ingress statistics. Packets
that are forwarded to the destination MPLS label corresponding to the
SID should be counted towards this value. In the egress direction, where
forwarding follows a SID value that requires PHP at the local node,
packets should still be counted towards the egress counters.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="exp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """exp must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..7']}), is_leaf=True, yang_name="exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__exp = t
if hasattr(self, "_set"):
self._set()
def _unset_exp(self):
self.__exp = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="exp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_in_pkts(self):
"""
Getter method for in_pkts, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/in_pkts (yang:counter64)
YANG Description: A cumulative counter of the packets received within the context
which have matched a label corresponding to an SR Segment Identifier.
"""
return self.__in_pkts
def _set_in_pkts(self, v, load=False):
"""
Setter method for in_pkts, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/in_pkts (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_pkts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_pkts() directly.
YANG Description: A cumulative counter of the packets received within the context
which have matched a label corresponding to an SR Segment Identifier.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_pkts must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_pkts = t
if hasattr(self, "_set"):
self._set()
def _unset_in_pkts(self):
self.__in_pkts = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_in_octets(self):
"""
Getter method for in_octets, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/in_octets (yang:counter64)
YANG Description: The cumulative counter of the total bytes received within the context
which have matched a label corresponding to an SR Segment Identifier
"""
return self.__in_octets
def _set_in_octets(self, v, load=False):
"""
Setter method for in_octets, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/in_octets (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_octets is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_octets() directly.
YANG Description: The cumulative counter of the total bytes received within the context
which have matched a label corresponding to an SR Segment Identifier
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """in_octets must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__in_octets = t
if hasattr(self, "_set"):
self._set()
def _unset_in_octets(self):
self.__in_octets = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="in-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_pkts(self):
"""
Getter method for out_pkts, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/out_pkts (yang:counter64)
YANG Description: A cumulative counter of the total number of packets transmitted by
the local system within the context which have a label imposed that
corresponds to an Segment Identifier.
"""
return self.__out_pkts
def _set_out_pkts(self, v, load=False):
"""
Setter method for out_pkts, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/out_pkts (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_pkts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_pkts() directly.
YANG Description: A cumulative counter of the total number of packets transmitted by
the local system within the context which have a label imposed that
corresponds to an Segment Identifier.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_pkts must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-pkts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_pkts = t
if hasattr(self, "_set"):
self._set()
def _unset_out_pkts(self):
self.__out_pkts = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-pkts",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
def _get_out_octets(self):
"""
Getter method for out_octets, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/out_octets (yang:counter64)
YANG Description: A cumulative counter of the total bytes transmitted by the local
system within the context which have a label imported that
corresponds to an SR Segment Identifier.
"""
return self.__out_octets
def _set_out_octets(self, v, load=False):
"""
Setter method for out_octets, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing/interfaces/interface/sid_counters/sid_counter/forwarding_classes/forwarding_class/state/out_octets (yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_out_octets is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_out_octets() directly.
YANG Description: A cumulative counter of the total bytes transmitted by the local
system within the context which have a label imported that
corresponds to an SR Segment Identifier.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """out_octets must be of a type compatible with yang:counter64""",
"defined-type": "yang:counter64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="out-octets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter64', is_config=False)""",
}
)
self.__out_octets = t
if hasattr(self, "_set"):
self._set()
def _unset_out_octets(self):
self.__out_octets = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="out-octets",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter64",
is_config=False,
)
exp = __builtin__.property(_get_exp)
in_pkts = __builtin__.property(_get_in_pkts)
in_octets = __builtin__.property(_get_in_octets)
out_pkts = __builtin__.property(_get_out_pkts)
out_octets = __builtin__.property(_get_out_octets)
_pyangbind_elements = OrderedDict(
[
("exp", exp),
("in_pkts", in_pkts),
("in_octets", in_octets),
("out_pkts", out_pkts),
("out_octets", out_octets),
]
)
|
|
"""This tutorial introduces the LeNet5 neural network architecture
using Theano. LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.
This implementation simplifies the model in the following ways:
- LeNetConvPool doesn't implement location-specific gain and bias parameters
- LeNetConvPool doesn't implement pooling by average, it implements pooling
by max.
- Digit classification is implemented with a logistic regression rather than
an RBF network
- LeNet5 was not fully-connected convolutions at second layer
References:
- Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
Gradient-Based Learning Applied to Document
Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
dataset='mnist.pkl.gz',
nkerns=[20, 50], batch_size=500):
""" Demonstrates lenet on MNIST dataset
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: path to the dataset used for training /testing (MNIST here)
:type nkerns: list of ints
:param nkerns: number of kernels on each layer
"""
rng = numpy.random.RandomState(23455)
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_valid_batches /= batch_size
n_test_batches /= batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (28, 28) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 28, 28))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 4 * 4,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
# the cost we minimize during training is the NLL of the model
cost = layer3.negative_log_likelihood(y)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
[index],
layer3.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-1
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print 'training @ iter = ', iter
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [
test_model(i)
for i in xrange(n_test_batches)
]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
evaluate_lenet5()
def experiment(state, channel):
evaluate_lenet5(state.learning_rate, dataset=state.dataset)
|
|
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
# copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:[email protected]
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""The ASTNGBuilder makes astng from living object and / or from compiler.ast
With python >= 2.5, the internal _ast module is used instead
The builder is not thread safe and can't be used to parse different sources
at the same time.
"""
__docformat__ = "restructuredtext en"
import sys
from os.path import splitext, basename, dirname, exists, abspath
from inspect import isfunction, ismethod, ismethoddescriptor, isclass, \
isbuiltin
from inspect import isdatadescriptor
from logilab.common.fileutils import norm_read
from logilab.common.modutils import modpath_from_file
from logilab.astng._exceptions import ASTNGBuildingException
from logilab.astng.raw_building import *
try:
from _ast import PyCF_ONLY_AST
def parse(string):
return compile(string, "<string>", 'exec', PyCF_ONLY_AST)
from logilab.astng._nodes_ast import TreeRebuilder
except ImportError, exc:
from compiler import parse
from logilab.astng import patchcomptransformer
from logilab.astng._nodes_compiler import TreeRebuilder
# ast NG builder ##############################################################
class ASTNGBuilder:
"""provide astng building methods
"""
def __init__(self, manager=None):
if manager is None:
from logilab.astng import MANAGER as manager
self._manager = manager
self._module = None
self._file = None
self._done = None
self.rebuilder = TreeRebuilder(manager)
self._dyn_modname_map = {'gtk': 'gtk._gtk'}
def module_build(self, module, modname=None):
"""build an astng from a living module instance
"""
node = None
self._module = module
path = getattr(module, '__file__', None)
if path is not None:
path_, ext = splitext(module.__file__)
if ext in ('.py', '.pyc', '.pyo') and exists(path_ + '.py'):
node = self.file_build(path_ + '.py', modname)
if node is None:
# this is a built-in module
# get a partial representation by introspection
node = self.inspect_build(module, modname=modname, path=path)
return node
def inspect_build(self, module, modname=None, path=None):
"""build astng from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
if modname is None:
modname = module.__name__
node = build_module(modname, module.__doc__)
node.file = node.path = path and abspath(path) or path
if self._manager is not None:
self._manager._cache[modname] = node
node.package = hasattr(module, '__path__')
self._done = {}
self.object_build(node, module)
return node
def file_build(self, path, modname=None):
"""build astng from a source code file (i.e. from an ast)
path is expected to be a python source file
"""
try:
data = norm_read(path)
except IOError, ex:
msg = 'Unable to load file %r (%s)' % (path, ex)
raise ASTNGBuildingException(msg)
self._file = path
# get module name if necessary, *before modifying sys.path*
if modname is None:
try:
modname = '.'.join(modpath_from_file(path))
except ImportError:
modname = splitext(basename(path))[0]
# build astng representation
try:
sys.path.insert(0, dirname(path))
node = self.string_build(data, modname, path)
node.file = abspath(path)
finally:
self._file = None
sys.path.pop(0)
return node
def string_build(self, data, modname='', path=None):
"""build astng from a source code stream (i.e. from an ast)"""
return self.ast_build(parse(data + '\n'), modname, path)
def ast_build(self, node, modname='', path=None):
"""build the astng from AST, return the new tree"""
if path is not None:
node_file = abspath(path)
else:
node_file = '<?>'
if modname.endswith('.__init__'):
modname = modname[:-9]
package = True
else:
package = path and path.find('__init__.py') > -1 or False
newnode = self.rebuilder.build(node, modname, node_file)
newnode.package = package
return newnode
# astng from living objects ###############################################
#
# this is actually a really minimal representation, including only Module,
# Function and Class nodes and some others as guessed
def object_build(self, node, obj):
"""recursive method which create a partial ast from real objects
(only function, class, and method are handled)
"""
if self._done.has_key(obj):
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
# damned ExtensionClass.Base, I know you're there !
attach_dummy_node(node, name)
continue
if ismethod(member):
member = member.im_func
if isfunction(member):
# verify this is not an imported function
if member.func_code.co_filename != getattr(self._module, '__file__', None):
attach_dummy_node(node, name, member)
continue
object_build_function(node, member, name)
elif isbuiltin(member):
# verify this is not an imported member
if self._member_module(member) != self._module.__name__:
imported_member(node, member, name)
continue
object_build_methoddescriptor(node, member, name)
elif isclass(member):
# verify this is not an imported class
if self._member_module(member) != self._module.__name__:
imported_member(node, member, name)
continue
if member in self._done:
class_node = self._done[member]
if not class_node in node.locals.get(name, ()):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
# recursion
self.object_build(class_node, member)
elif ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif isinstance(member, (int, long, float, str, unicode)) or member is None:
attach_const_node(node, name, member)
else:
# create an empty node so that the name is actually defined
attach_dummy_node(node, name, member)
def _member_module(self, member):
modname = getattr(member, '__module__', None)
return self._dyn_modname_map.get(modname, modname)
def imported_member(node, member, name):
"""consider a class/builtin member where __module__ != current module name
check if it's sound valid and then add an import node, else use a dummy node
"""
# /!\ some classes like ExtensionClass doesn't have a
# __module__ attribute !
member_module = getattr(member, '__module__', '__builtin__')
try:
getattr(sys.modules[member_module], name)
except (KeyError, AttributeError):
attach_dummy_node(node, name, member)
else:
attach_import_node(node, member_module, name)
|
|
__author__ = 'Alfredo Saglimbeni'
from datetime import datetime
import re
import uuid
from django.forms import forms, widgets
from django.forms.widgets import MultiWidget, DateTimeInput, DateInput, TimeInput
from django.utils.formats import get_format, get_language
from django.utils.safestring import mark_safe
from django.utils.six import string_types
try:
from django.forms.widgets import to_current_timezone
except ImportError:
to_current_timezone = lambda obj: obj # passthrough, no tz support
# This should be updated as more .po files are added to the datetime picker javascript code
supported_languages = set([
'ar',
'bg',
'ca', 'cs',
'da', 'de',
'ee', 'el', 'es',
'fi', 'fr',
'he', 'hr', 'hu',
'id', 'is', 'it',
'ja',
'ko', 'kr',
'lt', 'lv',
'ms',
'nb', 'nl', 'no',
'pl', 'pt-BR', 'pt',
'ro', 'rs', 'rs-latin', 'ru',
'sk', 'sl', 'sv', 'sw',
'th', 'tr',
'ua', 'uk',
'zh-CN', 'zh-TW',
])
def get_supported_language(language_country_code):
"""Helps us get from django's 'language-countryCode' to the datepicker's 'language' if we
possibly can.
If we pass the django 'language_countryCode' through untouched then it might not
match an exact language string supported by the datepicker and would default to English which
would be worse than trying to match the language part.
"""
# Catch empty strings in case one sneeks in
if not language_country_code:
return 'en'
# Check full language & country code against the supported languages as there are dual entries
# in the list eg. zh-CN (assuming that is a language country code)
if language_country_code in supported_languages:
return language_country_code
# Grab just the language part and try that
language = language_country_code.split('-')[0]
if language in supported_languages:
return language
# Otherwise return English as the default
return 'en'
dateConversiontoPython = {
'P': '%p',
'ss': '%S',
'ii': '%M',
'hh': '%H',
'HH': '%I',
'dd': '%d',
'mm': '%m',
'yy': '%y',
'yyyy': '%Y',
}
toPython_re = re.compile(r'\b(' + '|'.join(dateConversiontoPython.keys()) + r')\b')
dateConversiontoJavascript = {
'%M': 'ii',
'%m': 'mm',
'%I': 'HH',
'%H': 'hh',
'%d': 'dd',
'%Y': 'yyyy',
'%y': 'yy',
'%p': 'P',
'%S': 'ss'
}
toJavascript_re = re.compile(r'(?<!\w)(' + '|'.join(dateConversiontoJavascript.keys()) + r')\b')
BOOTSTRAP_INPUT_TEMPLATE = {
2: """
<div id="%(id)s" class="controls input-append date">
%(label)s
%(rendered_widget)s
%(clear_button)s
<span class="add-on"><i class="icon-th"></i></span>
</div>
<script type="text/javascript">
$(function(){$("#%(id)s").datetimepicker({%(options)s});});
</script>
""",
3: """
<div id="%(id)s" class="input-group date">
%(label)s
%(rendered_widget)s
%(clear_button)s
<span class="input-group-addon"><span class="glyphicon %(glyphicon)s"></span></span>
</div>
<script type="text/javascript">
$(function(){$("#%(id)s").datetimepicker({%(options)s}).find('input').addClass("form-control");});
</script>
"""
}
CLEAR_BTN_TEMPLATE = {2: """<span class="add-on"><i class="icon-remove"></i></span>""",
3: """<span class="input-group-addon"><span class="glyphicon glyphicon-remove"></span></span>"""}
LABEL_TEMPLATE = {2: """<span class="add-on">%(label)s</span>""",
3: """<span class="input-group-addon">%(label)s</span>"""}
quoted_options = set([
'format',
'startDate',
'endDate',
'startView',
'minView',
'maxView',
'todayBtn',
'language',
'pickerPosition',
'viewSelect',
'initialDate',
'weekStart',
'minuteStep'
'daysOfWeekDisabled',
])
# to traslate boolean object to javascript
quoted_bool_options = set([
'autoclose',
'todayHighlight',
'showMeridian',
'clearBtn',
])
def quote(key, value):
"""Certain options support string values. We want clients to be able to pass Python strings in
but we need them to be quoted in the output. Unfortunately some of those options also allow
numbers so we type check the value before wrapping it in quotes.
"""
if key in quoted_options and isinstance(value, string_types):
return "'%s'" % value
if key in quoted_bool_options and isinstance(value, bool):
return {True:'true',False:'false'}[value]
return value
class Javascript(object):
"""Object to help inject Javascript code into options settings that get quoted if they
are strings. This gets around the issue by not being a string-type and injecting the code
without quotes in the __str__ method. Example:
# Sets the iniital date to 7pm on the client's current date
>>> options = {
'initialDate': Javascript('''function() {
var now = new Date();
var seven = new Date(now.getFullYear(), now.getMonth(), now.getDate(), 19, 0, 0);
return seven;
}()''')
}
"""
def __init__(self, code):
self.code = code
def __str__(self):
return self.code
class PickerWidgetMixin(object):
format_name = None
glyphicon = None
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if bootstrap_version in [2,3]:
self.bootstrap_version = bootstrap_version
else:
# default 2 to mantain support to old implemetation of django-datetime-widget
self.bootstrap_version = 2
if attrs is None:
attrs = {'readonly': ''}
self.options = options
self.is_localized = False
self.format = None
# We want to have a Javascript style date format specifier in the options dictionary and we
# want a Python style date format specifier as a member variable for parsing the date string
# from the form data
if usel10n is True:
# If we're doing localisation, get the local Python date format and convert it to
# Javascript data format for the options dictionary
self.is_localized = True
# Get format from django format system
self.format = get_format(self.format_name)[0]
# Convert Python format specifier to Javascript format specifier
self.options['format'] = toJavascript_re.sub(
lambda x: dateConversiontoJavascript[x.group()],
self.format
)
# Set the local language
self.options['language'] = get_supported_language(get_language())
else:
# If we're not doing localisation, get the Javascript date format provided by the user,
# with a default, and convert it to a Python data format for later string parsing
format = self.options['format']
self.format = toPython_re.sub(
lambda x: dateConversiontoPython[x.group()],
format
)
super(PickerWidgetMixin, self).__init__(attrs, format=self.format)
def render(self, name, value, attrs=None, renderer=None):
final_attrs = self.build_attrs(attrs)
rendered_widget = super(PickerWidgetMixin, self).render(name, value, final_attrs)
#if not set, autoclose have to be true.
self.options.setdefault('autoclose', True)
# Build javascript options out of python dictionary
options_list = []
for key, value in iter(self.options.items()):
options_list.append("%s: %s" % (key, quote(key, value)))
js_options = ",\n".join(options_list)
# Use provided id or generate hex to avoid collisions in document
id = final_attrs.get('id', uuid.uuid4().hex)
clearBtn = quote('clearBtn', self.options.get('clearBtn', 'true')) == 'true'
labelField = final_attrs.get('label', False)
return mark_safe(
BOOTSTRAP_INPUT_TEMPLATE[self.bootstrap_version]
% dict(
id=id,
rendered_widget=rendered_widget,
label=LABEL_TEMPLATE[self.bootstrap_version] % dict(label=labelField) if labelField else "",
clear_button=CLEAR_BTN_TEMPLATE[self.bootstrap_version] if clearBtn else "",
glyphicon=self.glyphicon,
options=js_options
)
)
def _media(self):
js = ["js/bootstrap-datetimepicker.js"]
language = self.options.get('language', 'en')
if language != 'en':
js.append("js/locales/bootstrap-datetimepicker.%s.js" % language)
return widgets.Media(
css={
'all': ('css/datetimepicker.css',)
},
js=js
)
media = property(_media)
class DateTimeWidget(PickerWidgetMixin, DateTimeInput):
"""
DateTimeWidget is the corresponding widget for Datetime field, it renders both the date and time
sections of the datetime picker.
"""
format_name = 'DATETIME_INPUT_FORMATS'
glyphicon = 'glyphicon-th'
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if options is None:
options = {}
# Set the default options to show only the datepicker object
options['format'] = options.get('format', 'dd/mm/yyyy hh:ii')
super(DateTimeWidget, self).__init__(attrs, options, usel10n, bootstrap_version)
class DateWidget(PickerWidgetMixin, DateInput):
"""
DateWidget is the corresponding widget for Date field, it renders only the date section of
datetime picker.
"""
format_name = 'DATE_INPUT_FORMATS'
glyphicon = 'glyphicon-calendar'
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if options is None:
options = {}
# Set the default options to show only the datepicker object
options['startView'] = options.get('startView', 2)
options['minView'] = options.get('minView', 2)
options['format'] = options.get('format', 'dd/mm/yyyy')
super(DateWidget, self).__init__(attrs, options, usel10n, bootstrap_version)
class TimeWidget(PickerWidgetMixin, TimeInput):
"""
TimeWidget is the corresponding widget for Time field, it renders only the time section of
datetime picker.
"""
format_name = 'TIME_INPUT_FORMATS'
glyphicon = 'glyphicon-time'
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if options is None:
options = {}
# Set the default options to show only the timepicker object
options['startView'] = options.get('startView', 1)
options['minView'] = options.get('minView', 0)
options['maxView'] = options.get('maxView', 1)
options['format'] = options.get('format', 'hh:ii')
super(TimeWidget, self).__init__(attrs, options, usel10n, bootstrap_version)
|
|
#!/usr/bin/env python
import netaddr
import pprint
import re
from JumpScale import j
import VXNet.vxlan as vxlan
import VXNet.netclasses as netcl
from VXNet.utils import *
class NetConfigFactory():
def __init__(self):
self._layout=None
self.PHYSMTU = 2000 # will fit all switches
def getConfigFromSystem(self,reload=False):
"""
walk over system and get configuration, result is dict
"""
if self._layout==None or reload:
self._layout=vxlan.NetLayout()
self._layout.load()
# add_ips_to(self._layout) #@todo fix
return self._layout.nicdetail
def _exec(self,cmd,failOnError=True):
print cmd
rc,out=j.system.process.execute(cmd,dieOnNonZeroExitCode=failOnError)
return out
def removeOldConfig(self):
cmd="brctl show"
for line in self._exec(cmd).split("\n"):
if line.strip()=="" or line.find("bridge name")<>-1:
continue
name=line.split("\t")[0]
self._exec("ip link set %s down"%name)
self._exec("brctl delbr %s"%name)
for intname,data in self.getConfigFromSystem(reload=True).iteritems():
if "PHYS" in data["detail"]:
continue
if intname =="ovs-system":
continue
self._exec("ovs-vsctl del-br %s"%intname,False)
out=self._exec("virsh net-list",False)
if out.find("virsh: not found")==-1:
state="start"
for line in out.split("\n"):
if state=="found":
if line.strip()=="":
continue
line=line.replace("\t"," ")
name=line.split(" ")[0]
self._exec("virsh net-destroy %s"%name,False)
self._exec("virsh net-undefine %s"%name,False)
if line.find("----")<>-1:
state="found"
j.system.fs.writeFile(filename="/etc/default/lxc-net",contents="USE_LXC_BRIDGE=\"false\"",append=True) #@todo UGLY use editor !!!
# Not used and expensive self.getConfigFromSystem(reload=True)
j.system.fs.writeFile(filename="/etc/network/interfaces",contents="auto lo\n iface lo inet loopback\n\n")
def initNetworkInterfaces(self):
"""
Resets /etc/network/interfaces with a basic configuration
"""
j.system.fs.writeFile(filename="/etc/network/interfaces",contents="auto lo\n iface lo inet loopback\n\n")
def loadNetworkInterfaces(self):
"""
Reloads the networking configuration which is basicly applying /etc/network/interfaces
"""
j.system.platform.ubuntu.restartService('networking')
def printConfigFromSystem(self):
pprint.pprint(self.getConfigFromSystem())
def newBridge(self,name,interface=None):
"""
@param interface interface where to connect this bridge to
"""
br=netcl.Bridge(name)
br.create()
if interface is not None:
br.connect(interface)
def newVlanBridge(self, name, parentbridge, vlanid, mtu=None):
addVlanPatch(parentbridge, name, vlanid, mtu=None)
def ensureVXNet(self, networkid, backend):
vxnet = vxlan.VXNet(networkid, backend)
vxnet.innamespace=False
vxnet.inbridge = True
vxnet.apply()
return vxnet
def createVXLanBridge(self, networkid, backend,bridgename=None):
"""
Creates a proper vxlan interface and bridge based on a backplane
"""
networkoid = netcl.NetID(networkid)
vxlan = netcl.VXlan(networkoid, backend)
vxlan.create()
vxlan.no6()
bridge = netcl.Bridge(bridgename)
bridge.create()
bridge.connect(vxlan.name)
return vxlan
def getType(self,interfaceName):
layout=self.getConfigFromSystem()
if not layout.has_key(interfaceName):
raise RuntimeError("cannot find interface %s"%interfaceName)
interf=layout[interfaceName]
if interf["params"].has_key("type"):
return interf["params"]["type"]
return None
def setBackplaneDhcp(self,interfacename="eth0",backplanename="Public"):
"""
DANGEROUS, will remove old configuration
"""
C="""
auto $BPNAME
allow-ovs $BPNAME
iface $BPNAME inet dhcp
dns-nameserver 8.8.8.8 8.8.4.4
ovs_type OVSBridge
ovs_ports $iname
allow-$BPNAME $iname
iface $iname inet manual
ovs_bridge $BPNAME
ovs_type OVSPort
up ip link set $iname mtu $MTU
"""
C=C.replace("$BPNAME", str(backplanename))
C=C.replace("$iname", interfacename)
C=C.replace("$MTU", str(self.PHYSMTU))
ed=j.codetools.getTextFileEditor("/etc/network/interfaces")
ed.setSection(backplanename,C)
def setBackplaneNoAddress(self,interfacename="eth0",backplanename=1):
"""
DANGEROUS, will remove old configuration
"""
C="""
auto $BPNAME
allow-ovs $BPNAME
iface $BPNAME inet manual
ovs_type OVSBridge
ovs_ports eth7
allow-$BPNAME $iname
iface $iname inet manual
ovs_bridge $BPNAME
ovs_type OVSPort
up ip link set $iname mtu $MTU
"""
C=C.replace("$BPNAME", str(backplanename))
C=C.replace("$iname", interfacename)
C=C.replace("$MTU", str(self.PHYSMTU)) # strings here
ed=j.codetools.getTextFileEditor("/etc/network/interfaces")
ed.setSection(backplanename,C)
def configureStaticAddress(self,interfacename="eth0",ipaddr="192.168.10.10/24",gw=None):
"""
Configure a static address
"""
C="""
auto $interface
allow-ovs $interface
iface $interface inet static
address $ipbase
netmask $mask
$gw
"""
n=netaddr.IPNetwork(ipaddr)
C=C.replace("$interface", interfacename)
C=C.replace("$ipbase", str(n.ip))
C=C.replace("$mask", str(n.netmask))
if gw:
C=C.replace("$gw", "gateway %s"%gw)
else:
C=C.replace("$gw", "")
ed=j.codetools.getTextFileEditor("/etc/network/interfaces")
ed.setSection(interfacename,C)
ed.save()
def setBackplaneNoAddressWithBond(self,bondname, bondinterfaces,backplanename='backplane'):
"""
DANGEROUS, will remove old configuration
"""
C="""
auto $BPNAME
allow-ovs $BPNAME
iface $BPNAME inet manual
ovs_type OVSBridge
ovs_ports $bondname
allow-$BPNAME $bondname
iface $bondname inet manual
ovs_bridge $BPNAME
ovs_type OVSBond
ovs_bonds $bondinterfaces
ovs_options bond_mode=balance-tcp lacp=active bond_fake_iface=false other_config:lacp-time=fast bond_updelay=2000 bond_downdelay=400
$disable_ipv6
"""
interfaces = ''
disable_ipv6 = ''
for interface in bondinterfaces:
interfaces += '%s ' % interface
disable_ipv6 += 'pre-up ip l set %s mtu 2000 \n up sysctl -w net.ipv6.conf.%s.disable_ipv6=1 \n' % (interface, interface)
C=C.replace("$BPNAME", str(backplanename))
C=C.replace("$bondname", bondname)
C=C.replace("$MTU", str(self.PHYSMTU))
C=C.replace("$bondinterfaces" , interfaces)
C=C.replace("$disable_ipv6" , disable_ipv6)
ed=j.codetools.getTextFileEditor("/etc/network/interfaces")
ed.setSection(backplanename,C)
ed.save()
def setBackplane(self,interfacename="eth0",backplanename=1,ipaddr="192.168.10.10/24",gw=""):
"""
DANGEROUS, will remove old configuration
"""
C="""
auto $BPNAME
allow-ovs $BPNAME
iface $BPNAME inet static
address $ipbase
netmask $mask
dns-nameserver 8.8.8.8 8.8.4.4
ovs_type OVSBridge
ovs_ports $iname
$gw
allow-$BPNAME $iname
iface $iname inet manual
ovs_bridge $BPNAME
ovs_type OVSPort
up ip link set $iname mtu $MTU
"""
n=netaddr.IPNetwork(ipaddr)
C=C.replace("$BPNAME", str(backplanename))
C=C.replace("$iname", interfacename)
C=C.replace("$ipbase", str(n.ip))
C=C.replace("$mask", str(n.netmask))
C=C.replace("$MTU", str(self.PHYSMTU))
if gw<>"" and gw<>None:
C=C.replace("$gw", "gateway %s"%gw)
else:
C=C.replace("$gw", "")
ed=j.codetools.getTextFileEditor("/etc/network/interfaces")
ed.setSection(backplanename,C)
ed.save()
def setBackplaneWithBond(self,bondname, bondinterfaces,backplanename='backplane',ipaddr="192.168.10.10/24",gw=""):
"""
DANGEROUS, will remove old configuration
"""
C="""
auto $BPNAME
allow-ovs $BPNAME
iface $BPNAME inet static
address $ipbase
netmask $mask
dns-nameserver 8.8.8.8 8.8.4.4
ovs_type OVSBridge
ovs_ports $bondname
$gw
allow-$BPNAME $bondname
iface $bondname inet manual
ovs_bridge $BPNAME
ovs_type OVSBond
ovs_bonds $bondinterfaces
ovs_options bond_mode=balance-tcp lacp=active bond_fake_iface=false other_config:lacp-time=fast bond_updelay=2000 bond_downdelay=400
$disable_ipv6
"""
n=netaddr.IPNetwork(ipaddr)
interfaces = ''
disable_ipv6 = ''
for interface in bondinterfaces:
interfaces += '%s ' % interface
disable_ipv6 += 'pre-up ip l set %s mtu 2000 \n up sysctl -w net.ipv6.conf.%s.disable_ipv6=1 \n' % (interface, interface)
C=C.replace("$BPNAME", str(backplanename))
C=C.replace("$bondname", bondname)
C=C.replace("$ipbase", str(n.ip))
C=C.replace("$mask", str(n.netmask))
C=C.replace("$MTU", str(self.PHYSMTU))
if gw<>"" and gw<>None:
C=C.replace("$gw", "gateway %s"%gw)
else:
C=C.replace("$gw", "")
C=C.replace("$bondinterfaces" , interfaces)
C=C.replace("$disable_ipv6" , disable_ipv6)
ed=j.codetools.getTextFileEditor("/etc/network/interfaces")
ed.setSection(backplanename,C)
ed.save()
def applyconfig(self,interfacenameToExclude=None,backplanename=None):
"""
DANGEROUS, will remove old configuration
"""
for intname,data in self.getConfigFromSystem(reload=True).iteritems():
if "PHYS" in data["detail"] and intname<>interfacenameToExclude:
self._exec("ip addr flush dev %s" % intname, False)
self._exec("ip link set %s down"%intname,False)
if backplanename<>None:
self._exec("ifdown %s"%backplanename, failOnError=False)
# self._exec("ifup %s"%backplanename, failOnError=True)
#@todo need to do more checks here that it came up and retry couple of times if it did not
#@ can do this by investigating self.getConfigFromSystem
print "restarting network, can take a while."
j.system.process.executeWithoutPipe("/etc/init.d/openvswitch-switch restart")
print self._exec("ip a", failOnError=True)
print self._exec("ovs-vsctl show", failOnError=True)
def newBondedBackplane(self, name, interfaces, trunks=None):
"""
Reasonable defaults : mode=balance-tcp, lacp=active,fast, bondname=brname-Bond, all vlans allowed
"""
br = netcl.BondBridge(name,interfaces)
br.create()
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import django.contrib.auth.forms
from django import forms
from django.contrib.auth.models import User, Group
from django.forms import ValidationError
from django.forms.util import ErrorList
from django.utils.translation import get_language, ugettext as _, ugettext_lazy as _t
from desktop import conf as desktop_conf
from desktop.lib.django_util import get_username_re_rule, get_groupname_re_rule
from desktop.settings import LANGUAGES
from useradmin.models import GroupPermission, HuePermission
from useradmin.models import get_default_user_group
from useradmin.password_policy import get_password_validators
LOG = logging.getLogger(__name__)
def get_server_choices():
if desktop_conf.LDAP.LDAP_SERVERS.get():
return [(ldap_server_record_key, ldap_server_record_key) for ldap_server_record_key in desktop_conf.LDAP.LDAP_SERVERS.get()]
else:
return []
def validate_dn(dn):
if not dn:
raise ValidationError(_('Full Distinguished Name required.'))
def validate_username(username_pattern):
validator = re.compile(r"^%s$" % get_username_re_rule())
if not username_pattern:
raise ValidationError(_('Username is required.'))
if len(username_pattern) > 30:
raise ValidationError(_('Username must be fewer than 30 characters.'))
if not validator.match(username_pattern):
raise ValidationError(_("Username must not contain whitespaces and ':'"))
def validate_groupname(groupname_pattern):
validator = re.compile(r"^%s$" % get_groupname_re_rule())
if not groupname_pattern:
raise ValidationError(_('Group name required.'))
if len(groupname_pattern) > 80:
raise ValidationError(_('Group name must be 80 characters or fewer.'))
if not validator.match(groupname_pattern):
raise ValidationError(_("Group name can be any character as long as it's 80 characters or fewer."))
def validate_first_name(first_name):
if first_name and len(first_name) > 30:
raise ValidationError(_('first_name must be fewer than 30 characters.'))
def validate_last_name(last_name):
if last_name and len(last_name) > 30:
raise ValidationError(_('last_name must be fewer than 30 characters.'))
class UserChangeForm(django.contrib.auth.forms.UserChangeForm):
"""
This is similar, but not quite the same as djagno.contrib.auth.forms.UserChangeForm
and UserCreationForm.
"""
GENERIC_VALIDATION_ERROR = _("Username or password is invalid.")
username = forms.RegexField(
label=_t("Username"),
max_length=30,
regex='^%s$' % (get_username_re_rule(),),
help_text = _t("Required. 30 characters or fewer. No whitespaces or colons."),
error_messages = {'invalid': _t("Whitespaces and ':' not allowed") })
password1 = forms.CharField(label=_t("New Password"),
widget=forms.
PasswordInput,
required=False,
validators=get_password_validators())
password2 = forms.CharField(label=_t("Password confirmation"),
widget=forms.PasswordInput,
required=False,
validators=get_password_validators())
password_old = forms.CharField(label=_t("Current password"), widget=forms.PasswordInput, required=False)
ensure_home_directory = forms.BooleanField(label=_t("Create home directory"),
help_text=_t("Create home directory if one doesn't already exist."),
initial=True,
required=False)
language = forms.ChoiceField(label=_t("Language Preference"),
choices=LANGUAGES,
required=False)
class Meta(django.contrib.auth.forms.UserChangeForm.Meta):
fields = ["username", "first_name", "last_name", "email", "ensure_home_directory"]
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
if self.instance.id:
self.fields['username'].widget.attrs['readonly'] = True
if 'desktop.auth.backend.LdapBackend' in desktop_conf.AUTH.BACKEND.get():
self.fields['password1'].widget.attrs['readonly'] = True
self.fields['password2'].widget.attrs['readonly'] = True
self.fields['password_old'].widget.attrs['readonly'] = True
self.fields['first_name'].widget.attrs['readonly'] = True
self.fields['last_name'].widget.attrs['readonly'] = True
self.fields['email'].widget.attrs['readonly'] = True
if 'is_active' in self.fields:
self.fields['is_active'].widget.attrs['readonly'] = True
if 'is_superuser' in self.fields:
self.fields['is_superuser'].widget.attrs['readonly'] = True
if 'groups' in self.fields:
self.fields['groups'].widget.attrs['readonly'] = True
def clean_username(self):
username = self.cleaned_data["username"]
if self.instance.username == username:
return username
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.GENERIC_VALIDATION_ERROR, code='duplicate_username')
def clean_password(self):
return self.cleaned_data["password"]
def clean_password2(self):
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data["password2"]
if password1 != password2:
raise forms.ValidationError(_t("Passwords do not match."))
return password2
def clean_password1(self):
password = self.cleaned_data.get("password1", "")
if self.instance.id is None and password == "":
raise forms.ValidationError(_("You must specify a password when creating a new user."))
return self.cleaned_data.get("password1", "")
def clean_password_old(self):
if self.instance.id is not None:
password1 = self.cleaned_data.get("password1", "")
password_old = self.cleaned_data.get("password_old", "")
if password1 != '' and not self.instance.check_password(password_old):
raise forms.ValidationError(self.GENERIC_VALIDATION_ERROR)
return self.cleaned_data.get("password_old", "")
def save(self, commit=True):
"""
Update password if it's set.
"""
user = super(UserChangeForm, self).save(commit=False)
if self.cleaned_data["password1"]:
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
# groups must be saved after the user
self.save_m2m()
return user
class PasswordChangeForm(UserChangeForm):
"""
This inherits from UserChangeForm to allow for forced password change on first login
"""
class Meta(UserChangeForm.Meta):
exclude = ('first_name', 'last_name', 'email')
def __init__(self, *args, **kwargs):
super(PasswordChangeForm, self).__init__(*args, **kwargs)
self.fields.pop('ensure_home_directory')
class SuperUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
fields = ["username", "is_active"] + UserChangeForm.Meta.fields + ["is_superuser", "groups"]
def __init__(self, *args, **kwargs):
super(SuperUserChangeForm, self).__init__(*args, **kwargs)
if self.instance.id:
# If the user exists already, we'll use its current group memberships
self.initial['groups'] = set(self.instance.groups.all())
else:
# If his is a new user, suggest the default group
default_group = get_default_user_group()
if default_group is not None:
self.initial['groups'] = set([default_group])
else:
self.initial['groups'] = []
class AddLdapUsersForm(forms.Form):
username_pattern = forms.CharField(
label=_t("Username"),
help_text=_t("Required. 30 characters or fewer with username. 64 characters or fewer with DN. No whitespaces or colons."),
error_messages={'invalid': _t("Whitespaces and ':' not allowed")})
dn = forms.BooleanField(label=_t("Distinguished name"),
help_text=_t("Whether or not the user should be imported by "
"distinguished name."),
initial=False,
required=False)
ensure_home_directory = forms.BooleanField(label=_t("Create home directory"),
help_text=_t("Create home directory for user if one doesn't already exist."),
initial=True,
required=False)
def __init__(self, *args, **kwargs):
super(AddLdapUsersForm, self).__init__(*args, **kwargs)
if get_server_choices():
self.fields['server'] = forms.ChoiceField(choices=get_server_choices(), required=True)
def clean(self):
cleaned_data = super(AddLdapUsersForm, self).clean()
username_pattern = cleaned_data.get("username_pattern")
dn = cleaned_data.get("dn")
try:
if dn:
validate_dn(username_pattern)
else:
validate_username(username_pattern)
except ValidationError, e:
errors = self._errors.setdefault('username_pattern', ErrorList())
errors.append(e.message)
raise forms.ValidationError(e.message)
return cleaned_data
class AddLdapGroupsForm(forms.Form):
groupname_pattern = forms.CharField(
label=_t("Name"),
max_length=256,
help_text=_t("Required. 256 characters or fewer."),
error_messages={'invalid': _t("256 characters or fewer.") })
dn = forms.BooleanField(label=_t("Distinguished name"),
help_text=_t("Whether or not the group should be imported by "
"distinguished name."),
initial=False,
required=False)
import_members = forms.BooleanField(label=_t('Import new members'),
help_text=_t('Import unimported or new users from the group.'),
initial=False,
required=False)
ensure_home_directories = forms.BooleanField(label=_t('Create home directories'),
help_text=_t('Create home directories for every member imported, if members are being imported.'),
initial=True,
required=False)
import_members_recursive = forms.BooleanField(label=_t('Import new members from all subgroups'),
help_text=_t('Import unimported or new users from the all subgroups.'),
initial=False,
required=False)
def __init__(self, *args, **kwargs):
super(AddLdapGroupsForm, self).__init__(*args, **kwargs)
if get_server_choices():
self.fields['server'] = forms.ChoiceField(choices=get_server_choices(), required=True)
def clean(self):
cleaned_data = super(AddLdapGroupsForm, self).clean()
groupname_pattern = cleaned_data.get("groupname_pattern")
dn = cleaned_data.get("dn")
try:
if dn:
validate_dn(groupname_pattern)
else:
validate_groupname(groupname_pattern)
except ValidationError, e:
errors = self._errors.setdefault('groupname_pattern', ErrorList())
errors.append(e.message)
raise forms.ValidationError(e.message)
return cleaned_data
class GroupEditForm(forms.ModelForm):
"""
Form to manipulate a group. This manages the group name and its membership.
"""
GROUPNAME = re.compile('^%s$' % get_groupname_re_rule())
class Meta:
model = Group
fields = ("name",)
def clean_name(self):
# Note that the superclass doesn't have a clean_name method.
data = self.cleaned_data["name"]
if not self.GROUPNAME.match(data):
raise forms.ValidationError(_("Group name may only contain letters, " +
"numbers, hyphens or underscores."))
return data
def __init__(self, *args, **kwargs):
super(GroupEditForm, self).__init__(*args, **kwargs)
if self.instance.id:
self.fields['name'].widget.attrs['readonly'] = True
initial_members = User.objects.filter(groups=self.instance).order_by('username')
initial_perms = HuePermission.objects.filter(grouppermission__group=self.instance).order_by('app','description')
else:
initial_members = []
initial_perms = []
self.fields["members"] = _make_model_field(_("members"), initial_members, User.objects.order_by('username'))
self.fields["permissions"] = _make_model_field(_("permissions"), initial_perms, HuePermission.objects.order_by('app','description'))
def _compute_diff(self, field_name):
current = set(self.fields[field_name].initial_objs)
updated = set(self.cleaned_data[field_name])
delete = current.difference(updated)
add = updated.difference(current)
return delete, add
def save(self):
super(GroupEditForm, self).save()
self._save_members()
self._save_permissions()
def _save_members(self):
delete_membership, add_membership = self._compute_diff("members")
for user in delete_membership:
user.groups.remove(self.instance)
user.save()
for user in add_membership:
user.groups.add(self.instance)
user.save()
def _save_permissions(self):
delete_permission, add_permission = self._compute_diff("permissions")
for perm in delete_permission:
GroupPermission.objects.get(group=self.instance, hue_permission=perm).delete()
for perm in add_permission:
GroupPermission.objects.create(group=self.instance, hue_permission=perm)
class PermissionsEditForm(forms.ModelForm):
"""
Form to manage the set of groups that have a particular permission.
"""
class Meta:
model = Group
fields = ()
def __init__(self, *args, **kwargs):
super(PermissionsEditForm, self).__init__(*args, **kwargs)
if self.instance.id:
initial_groups = Group.objects.filter(grouppermission__hue_permission=self.instance).order_by('name')
else:
initial_groups = []
self.fields["groups"] = _make_model_field(_("groups"), initial_groups, Group.objects.order_by('name'))
def _compute_diff(self, field_name):
current = set(self.fields[field_name].initial_objs)
updated = set(self.cleaned_data[field_name])
delete = current.difference(updated)
add = updated.difference(current)
return delete, add
def save(self):
self._save_permissions()
def _save_permissions(self):
delete_group, add_group = self._compute_diff("groups")
for group in delete_group:
GroupPermission.objects.get(group=group, hue_permission=self.instance).delete()
for group in add_group:
GroupPermission.objects.create(group=group, hue_permission=self.instance)
def _make_model_field(label, initial, choices, multi=True):
""" Creates multiple choice field with given query object as choices. """
if multi:
field = forms.models.ModelMultipleChoiceField(choices, required=False)
field.initial_objs = initial
field.initial = [ obj.pk for obj in initial ]
field.label = label
else:
field = forms.models.ModelChoiceField(choices, required=False)
field.initial_obj = initial
if initial:
field.initial = initial.pk
return field
class SyncLdapUsersGroupsForm(forms.Form):
ensure_home_directory = forms.BooleanField(label=_t("Create Home Directories"),
help_text=_t("Create home directory for every user, if one doesn't already exist."),
initial=True,
required=False)
def __init__(self, *args, **kwargs):
super(SyncLdapUsersGroupsForm, self).__init__(*args, **kwargs)
if get_server_choices():
self.fields['server'] = forms.ChoiceField(choices=get_server_choices(), required=True)
|
|
import calendar
import time
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from django.conf import settings
from django.http import HttpRequest
from django.utils import translation
from two_factor.utils import default_device
from zerver.context_processors import get_apps_page_url
from zerver.lib.events import do_events_register
from zerver.lib.i18n import (
get_and_set_request_language,
get_language_list,
get_language_translation_data,
)
from zerver.lib.request import RequestNotes
from zerver.models import Message, Realm, Stream, UserProfile
from zerver.views.message_flags import get_latest_update_message_flag_activity
@dataclass
class BillingInfo:
show_billing: bool
show_plans: bool
@dataclass
class UserPermissionInfo:
color_scheme: int
is_guest: bool
is_realm_admin: bool
is_realm_owner: bool
show_webathena: bool
def get_furthest_read_time(user_profile: Optional[UserProfile]) -> Optional[float]:
if user_profile is None:
return time.time()
user_activity = get_latest_update_message_flag_activity(user_profile)
if user_activity is None:
return None
return calendar.timegm(user_activity.last_visit.utctimetuple())
def get_bot_types(user_profile: Optional[UserProfile]) -> List[Dict[str, object]]:
bot_types: List[Dict[str, object]] = []
if user_profile is None:
return bot_types
for type_id, name in UserProfile.BOT_TYPES.items():
bot_types.append(
dict(
type_id=type_id,
name=name,
allowed=type_id in user_profile.allowed_bot_types,
)
)
return bot_types
def promote_sponsoring_zulip_in_realm(realm: Realm) -> bool:
if not settings.PROMOTE_SPONSORING_ZULIP:
return False
# If PROMOTE_SPONSORING_ZULIP is enabled, advertise sponsoring
# Zulip in the gear menu of non-paying organizations.
return realm.plan_type in [Realm.STANDARD_FREE, Realm.SELF_HOSTED]
def get_billing_info(user_profile: Optional[UserProfile]) -> BillingInfo:
show_billing = False
show_plans = False
if settings.CORPORATE_ENABLED and user_profile is not None:
if user_profile.has_billing_access:
from corporate.models import CustomerPlan, get_customer_by_realm
customer = get_customer_by_realm(user_profile.realm)
if customer is not None:
if customer.sponsorship_pending:
show_billing = True
elif CustomerPlan.objects.filter(customer=customer).exists():
show_billing = True
if not user_profile.is_guest and user_profile.realm.plan_type == Realm.LIMITED:
show_plans = True
return BillingInfo(
show_billing=show_billing,
show_plans=show_plans,
)
def get_user_permission_info(user_profile: Optional[UserProfile]) -> UserPermissionInfo:
if user_profile is not None:
return UserPermissionInfo(
color_scheme=user_profile.color_scheme,
is_guest=user_profile.is_guest,
is_realm_owner=user_profile.is_realm_owner,
is_realm_admin=user_profile.is_realm_admin,
show_webathena=user_profile.realm.webathena_enabled,
)
else:
return UserPermissionInfo(
color_scheme=UserProfile.COLOR_SCHEME_AUTOMATIC,
is_guest=False,
is_realm_admin=False,
is_realm_owner=False,
show_webathena=False,
)
def build_page_params_for_home_page_load(
request: HttpRequest,
user_profile: Optional[UserProfile],
realm: Realm,
insecure_desktop_app: bool,
narrow: List[List[str]],
narrow_stream: Optional[Stream],
narrow_topic: Optional[str],
first_in_realm: bool,
prompt_for_invites: bool,
needs_tutorial: bool,
) -> Tuple[int, Dict[str, Any]]:
"""
This function computes page_params for when we load the home page.
The page_params data structure gets sent to the client.
"""
client_capabilities = {
"notification_settings_null": True,
"bulk_message_deletion": True,
"user_avatar_url_field_optional": True,
"stream_typing_notifications": False, # Set this to True when frontend support is implemented.
"user_settings_object": True,
}
if user_profile is not None:
client = RequestNotes.get_notes(request).client
assert client is not None
register_ret = do_events_register(
user_profile,
client,
apply_markdown=True,
client_gravatar=True,
slim_presence=True,
client_capabilities=client_capabilities,
narrow=narrow,
include_streams=False,
)
else:
# Since events for spectator is not implemented, we only fetch the data
# at the time of request and don't register for any events.
# TODO: Implement events for spectator.
from zerver.lib.events import fetch_initial_state_data, post_process_state
register_ret = fetch_initial_state_data(
user_profile,
realm=realm,
event_types=None,
queue_id=None,
client_gravatar=False,
user_avatar_url_field_optional=client_capabilities["user_avatar_url_field_optional"],
user_settings_object=client_capabilities["user_settings_object"],
slim_presence=False,
include_subscribers=False,
include_streams=False,
)
post_process_state(user_profile, register_ret, False)
furthest_read_time = get_furthest_read_time(user_profile)
request_language = get_and_set_request_language(
request,
register_ret["user_settings"]["default_language"],
translation.get_language_from_path(request.path_info),
)
two_fa_enabled = settings.TWO_FACTOR_AUTHENTICATION_ENABLED and user_profile is not None
billing_info = get_billing_info(user_profile)
user_permission_info = get_user_permission_info(user_profile)
# Pass parameters to the client-side JavaScript code.
# These end up in a JavaScript Object named 'page_params'.
page_params = dict(
## Server settings.
test_suite=settings.TEST_SUITE,
insecure_desktop_app=insecure_desktop_app,
login_page=settings.HOME_NOT_LOGGED_IN,
warn_no_email=settings.WARN_NO_EMAIL,
search_pills_enabled=settings.SEARCH_PILLS_ENABLED,
# Only show marketing email settings if on Zulip Cloud
corporate_enabled=settings.CORPORATE_ENABLED,
## Misc. extra data.
language_list=get_language_list(),
needs_tutorial=needs_tutorial,
first_in_realm=first_in_realm,
prompt_for_invites=prompt_for_invites,
furthest_read_time=furthest_read_time,
bot_types=get_bot_types(user_profile),
two_fa_enabled=two_fa_enabled,
apps_page_url=get_apps_page_url(),
show_billing=billing_info.show_billing,
promote_sponsoring_zulip=promote_sponsoring_zulip_in_realm(realm),
show_plans=billing_info.show_plans,
show_webathena=user_permission_info.show_webathena,
# Adding two_fa_enabled as condition saves us 3 queries when
# 2FA is not enabled.
two_fa_enabled_user=two_fa_enabled and bool(default_device(user_profile)),
is_spectator=user_profile is None,
# There is no event queue for spectators since
# events support for spectators is not implemented yet.
no_event_queue=user_profile is None,
)
for field_name in register_ret.keys():
page_params[field_name] = register_ret[field_name]
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = narrow_stream.recipient
try:
max_message_id = (
Message.objects.filter(recipient=recipient).order_by("id").reverse()[0].id
)
except IndexError:
max_message_id = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = max_message_id
assert isinstance(page_params["user_settings"], dict)
page_params["user_settings"]["enable_desktop_notifications"] = False
page_params["translation_data"] = get_language_translation_data(request_language)
return register_ret["queue_id"], page_params
|
|
#!C:\Users\DMoran\Downloads\WinPython-64bit-2.7.13.1Zero\python-2.7.13.amd64\python.exe
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <[email protected]>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
|
from HappyTools.util.fitting import gauss_function
from bisect import bisect_left, bisect_right, bisect
from math import sqrt, log
from numpy import amax, argmax, array, exp, greater, less, linspace, mean, std
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.optimize import curve_fit
from scipy.signal import argrelextrema
from sys import maxsize
class Peak(object):
def __init__(self, master):
self.master = master
self.settings = master.settings
self.logger = master.logger
self.peak_name = master.peak_name
self.peak_time = master.peak_time
self.peak_window = master.peak_window
self.peak_area = 0.
self.gaussian_area = 0.
self.signal_noise = 0.
self.background_area = 0.
self.peak_noise = 0.
self.residual = 0.
self.background = 0.
self.noise = 0.
self.fwhm = 0.
self.width = 0.
self.height = 0.
self.center = 0.
self.actual_time = 0.
self.total_area = 0.
self.coeff = array([])
self.peak_data = None
self.first_derivative_data = None
self.univariate_spline_data = None
self.breaks = []
self.peak_maximum_data = None
# Inherit full data
time, _ = zip(*master.chrom_data)
low_background = bisect_left(time, max(
self.peak_time-max(self.settings.background_window,
self.peak_window), self.settings.start))
high_background = bisect_right(time, min(
self.peak_time+max(self.settings.background_window,
self.peak_window), self.settings.end))
# Inherit only the required data (based on background window)
time, intensity = zip(*master.chrom_data[low_background:high_background])
self.low = bisect_left(time, self.peak_time-self.peak_window)
self.high = bisect_right(time, self.peak_time+self.peak_window)
self.peak_data = list(zip(time, intensity))
def background_correct(self):
time, intensity = zip(*self.peak_data)
intensity = [x-self.background for x in intensity]
self.peak_data = list(zip(time, intensity))
def determine_actual_time(self):
time, intensity = zip(*self.peak_data)
if self.coeff.any():
intensity = gauss_function(time, *self.coeff)
intensity = intensity.tolist()
max_intensity_index = intensity.index(max(intensity))
self.actual_time = time[max_intensity_index]
def determine_background_and_noise(self):
_, intensity = zip(*self.peak_data)
if self.settings.background_noise_method == 'NOBAN':
raise NotImplementedError('This feature is not implemented in '+
'the refactor yet.')
elif self.settings.background_noise_method == 'MT':
background = maxsize
noise = 0
for index, _ in enumerate(intensity[:-self.settings.slicepoints]):
buffer = intensity[index:index+self.settings.slicepoints]
if mean(buffer) < background:
background = mean(buffer)
if self.settings.noise == 'MM':
noise = max(buffer)-min(buffer)
elif self.settings.noise == 'RMS':
noise = std(buffer)
if noise == 0:
noise = 1
self.background = background
self.noise = noise
def determine_breakpoints(self):
time, intensity = zip(*self.first_derivative_data)
maxm = argrelextrema(array(intensity), greater)
minm = argrelextrema(array(intensity), less)
breaks = maxm[0].tolist() + minm[0].tolist()
breaks = sorted(breaks)
self.breaks = breaks
def determine_background_area(self):
background_area = 0
time, intensity = zip(*self.peak_data)
for index, _ in enumerate(intensity[self.low:self.high]):
try:
background_area += max(self.background, 0) * (
time[self.low+index]-time[self.low+index-1])
except IndexError:
continue
self.background_area = background_area
def determine_gaussian_area(self):
time, intensity = zip(*self.peak_data)
gaussian_area = 0.
for index, _ in enumerate(intensity[self.low:self.high]):
gaussian_area += max(gauss_function(time[self.low+index],
*self.coeff), 0) * (time[self.low+index]-
time[self.low+index-1])
self.gaussian_area = gaussian_area
def determine_gaussian_coefficients(self):
x_data, y_data = zip(*self.peak_maximum_data)
peak = array(x_data)[y_data > exp(-0.5)*max(y_data)]
guess_sigma = 0.5*(max(peak) - min(peak))
p0 = [amax(y_data), x_data[argmax(y_data)], guess_sigma]
try:
coeff, _ = curve_fit(gauss_function, x_data, y_data, p0)
self.coeff = coeff
except TypeError:
self.logger.warn('Not enough data points to fit a Gaussian to '+
'peak: '+str(self.peak))
except RuntimeError:
self.logger.error('Unable to determine residuals for peak: '+
str(self.peak))
def determine_gaussian_parameters(self):
'''Calculate the FWHM.
This function will calculate the FWHM based on the following formula
FWHM = 2*sigma*sqrt(2*ln(2)). The function will return a dictionary
with the fwhm ('fwhm'), the Gaussian peak center ('center') and the
+/- width, from the peak center ('width').
Keyword arguments:
coeff -- coefficients as calculated by SciPy curve_fit
'''
fwhm = abs(2*self.coeff[2]*sqrt(2*log(2)))
width = 0.5*fwhm
center = self.coeff[1]
self.fwhm = fwhm
self.width = width
self.center = center
def determine_height(self):
edge = self.center+self.width
height = gauss_function(edge, *self.coeff)
self.height = height
def determine_peak_area(self):
peak_area = 0.
time, intensity = zip(*self.peak_data)
for index, j in enumerate(intensity[self.low:self.high]):
try:
peak_area += max(j, 0) * (time[self.low+index]-
time[self.low+index-1])
except IndexError:
continue
self.peak_area = peak_area
def determine_peak_noise(self):
_, intensity = zip(*self.peak_data)
peak_noise = std(intensity[self.low:self.high])
self.peak_noise = peak_noise
def determine_residual(self):
residual = 0.
try:
if self.gaussian_area != 0:
residual = min(self.gaussian_area / self.total_area, 1.0)
except ZeroDivisionError:
pass
self.residual = residual
def determine_signal_noise(self):
_, intensity = zip(*self.peak_data)
maximum_point = max(intensity[self.low:self.high])
signal_noise = (maximum_point - self.background) / self.noise
self.signal_noise = signal_noise
def determine_spline_and_derivative(self):
time, intensity = zip(*self.peak_data)
low = bisect_left(time, self.peak_time-self.peak_window)
high = bisect_right(time, self.peak_time+self.peak_window)
# Failsafe
if high == len(time):
high =- 1
new_x = linspace(time[low], time[high],
int(2500*(time[high]-time[low])))
f = InterpolatedUnivariateSpline(time[low:high],
intensity[low:high])
f_prime = f.derivative()
self.univariate_spline_data = list (zip(new_x, f(new_x)))
self.first_derivative_data = list(zip(new_x, f_prime(new_x)))
def determine_total_area(self):
total_area = 0.
time, intensity = zip(*self.peak_data)
for index, j in enumerate(intensity[self.low:self.high]):
total_area += max(j-self.background, 0) * (time[self.low+index]-
time[self.low+index-1])
self.total_area = total_area
def subset_data(self):
# Todo, use bisect_left and bisect_right here for consistency
time, intensity = zip(*self.univariate_spline_data)
max_intensity = max(intensity)
max_value = intensity.index(max_intensity)
insert = bisect(self.breaks, max_value)
if insert == 0:
start = 0
else:
start = self.breaks[insert-1]
try:
end = self.breaks[insert]
except IndexError:
end = None
time = time[start:end]
intensity = intensity[start:end]
self.peak_maximum_data = list(zip(time, intensity))
|
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: route53_zone
short_description: add or delete Route53 zones
description:
- Creates and deletes Route53 private and public zones
version_added: "2.0"
requirements: [ boto3 ]
options:
zone:
description:
- "The DNS zone record (eg: foo.com.)"
required: true
state:
description:
- whether or not the zone should exist or not
default: present
choices: [ "present", "absent" ]
vpc_id:
description:
- The VPC ID the zone should be a part of (if this is going to be a private zone)
vpc_region:
description:
- The VPC Region the zone should be a part of (if this is going to be a private zone)
comment:
description:
- Comment associated with the zone
default: ''
hosted_zone_id:
description:
- The unique zone identifier you want to delete or "all" if there are many zones with the same domain name.
Required if there are multiple zones identified with the above options
version_added: 2.4
delegation_set_id:
description:
- The reusable delegation set ID to be associated with the zone.
Note that you can't associate a reusable delegation set with a private hosted zone.
version_added: 2.6
extends_documentation_fragment:
- aws
- ec2
author: "Christopher Troup (@minichate)"
'''
EXAMPLES = '''
- name: create a public zone
route53_zone:
zone: example.com
comment: this is an example
- name: delete a public zone
route53_zone:
zone: example.com
state: absent
- name: create a private zone
route53_zone:
zone: devel.example.com
vpc_id: '{{ myvpc_id }}'
vpc_region: us-west-2
comment: developer domain
- name: create a public zone associated with a specific reusable delegation set
route53_zone:
zone: example.com
comment: reusable delegation set example
delegation_set_id: A1BCDEF2GHIJKL
'''
RETURN = '''
comment:
description: optional hosted zone comment
returned: when hosted zone exists
type: str
sample: "Private zone"
name:
description: hosted zone name
returned: when hosted zone exists
type: str
sample: "private.local."
private_zone:
description: whether hosted zone is private or public
returned: when hosted zone exists
type: bool
sample: true
vpc_id:
description: id of vpc attached to private hosted zone
returned: for private hosted zone
type: str
sample: "vpc-1d36c84f"
vpc_region:
description: region of vpc attached to private hosted zone
returned: for private hosted zone
type: str
sample: "eu-west-1"
zone_id:
description: hosted zone id
returned: when hosted zone exists
type: str
sample: "Z6JQG9820BEFMW"
delegation_set_id:
description: id of the associated reusable delegation set
returned: for public hosted zones, if they have been associated with a reusable delegation set
type: str
sample: "A1BCDEF2GHIJKL"
'''
import time
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
def find_zones(module, client, zone_in, private_zone):
try:
paginator = client.get_paginator('list_hosted_zones')
results = paginator.paginate().build_full_result()
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not list current hosted zones")
zones = []
for r53zone in results['HostedZones']:
if r53zone['Name'] != zone_in:
continue
# only save zone names that match the public/private setting
if (r53zone['Config']['PrivateZone'] and private_zone) or \
(not r53zone['Config']['PrivateZone'] and not private_zone):
zones.append(r53zone)
return zones
def create(module, client, matching_zones):
zone_in = module.params.get('zone').lower()
vpc_id = module.params.get('vpc_id')
vpc_region = module.params.get('vpc_region')
comment = module.params.get('comment')
delegation_set_id = module.params.get('delegation_set_id')
if not zone_in.endswith('.'):
zone_in += "."
private_zone = bool(vpc_id and vpc_region)
record = {
'private_zone': private_zone,
'vpc_id': vpc_id,
'vpc_region': vpc_region,
'comment': comment,
'name': zone_in,
'delegation_set_id': delegation_set_id,
'zone_id': None,
}
if private_zone:
changed, result = create_or_update_private(module, client, matching_zones, record)
else:
changed, result = create_or_update_public(module, client, matching_zones, record)
return changed, result
def create_or_update_private(module, client, matching_zones, record):
for z in matching_zones:
try:
result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id'])
zone_details = result['HostedZone']
vpc_details = result['VPCs']
current_vpc_id = None
current_vpc_region = None
if isinstance(vpc_details, dict):
if vpc_details['VPC']['VPCId'] == record['vpc_id']:
current_vpc_id = vpc_details['VPC']['VPCId']
current_vpc_region = vpc_details['VPC']['VPCRegion']
else:
if record['vpc_id'] in [v['VPCId'] for v in vpc_details]:
current_vpc_id = record['vpc_id']
if record['vpc_region'] in [v['VPCRegion'] for v in vpc_details]:
current_vpc_region = record['vpc_region']
if record['vpc_id'] == current_vpc_id and record['vpc_region'] == current_vpc_region:
record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']:
if not module.check_mode:
try:
client.update_hosted_zone_comment(Id=zone_details['Id'], Comment=record['comment'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id'])
return True, record
else:
record['msg'] = "There is already a private hosted zone in the same region with the same VPC \
you chose. Unable to create a new private hosted zone in the same name space."
return False, record
if not module.check_mode:
try:
result = client.create_hosted_zone(
Name=record['name'],
HostedZoneConfig={
'Comment': record['comment'] if record['comment'] is not None else "",
'PrivateZone': True,
},
VPC={
'VPCRegion': record['vpc_region'],
'VPCId': record['vpc_id'],
},
CallerReference="%s-%s" % (record['name'], time.time()),
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not create hosted zone")
hosted_zone = result['HostedZone']
zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
record['zone_id'] = zone_id
changed = True
return changed, record
def create_or_update_public(module, client, matching_zones, record):
zone_details, zone_delegation_set_details = None, {}
for matching_zone in matching_zones:
try:
zone = client.get_hosted_zone(Id=matching_zone['Id'])
zone_details = zone['HostedZone']
zone_delegation_set_details = zone.get('DelegationSet', {})
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % matching_zone['Id'])
if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']:
if not module.check_mode:
try:
client.update_hosted_zone_comment(
Id=zone_details['Id'],
Comment=record['comment']
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id'])
changed = True
else:
changed = False
break
if zone_details is None:
if not module.check_mode:
try:
params = dict(
Name=record['name'],
HostedZoneConfig={
'Comment': record['comment'] if record['comment'] is not None else "",
'PrivateZone': False,
},
CallerReference="%s-%s" % (record['name'], time.time()),
)
if record.get('delegation_set_id') is not None:
params['DelegationSetId'] = record['delegation_set_id']
result = client.create_hosted_zone(**params)
zone_details = result['HostedZone']
zone_delegation_set_details = result.get('DelegationSet', {})
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not create hosted zone")
changed = True
if module.check_mode:
if zone_details:
record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
else:
record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
record['name'] = zone_details['Name']
record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '')
return changed, record
def delete_private(module, client, matching_zones, vpc_id, vpc_region):
for z in matching_zones:
try:
result = client.get_hosted_zone(Id=z['Id'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id'])
zone_details = result['HostedZone']
vpc_details = result['VPCs']
if isinstance(vpc_details, dict):
if vpc_details['VPC']['VPCId'] == vpc_id and vpc_region == vpc_details['VPC']['VPCRegion']:
if not module.check_mode:
try:
client.delete_hosted_zone(Id=z['Id'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
return True, "Successfully deleted %s" % zone_details['Name']
else:
if vpc_id in [v['VPCId'] for v in vpc_details] and vpc_region in [v['VPCRegion'] for v in vpc_details]:
if not module.check_mode:
try:
client.delete_hosted_zone(Id=z['Id'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
return True, "Successfully deleted %s" % zone_details['Name']
return False, "The vpc_id and the vpc_region do not match a private hosted zone."
def delete_public(module, client, matching_zones):
if len(matching_zones) > 1:
changed = False
msg = "There are multiple zones that match. Use hosted_zone_id to specify the correct zone."
else:
if not module.check_mode:
try:
client.delete_hosted_zone(Id=matching_zones[0]['Id'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not get delete hosted zone %s" % matching_zones[0]['Id'])
changed = True
msg = "Successfully deleted %s" % matching_zones[0]['Id']
return changed, msg
def delete_hosted_id(module, client, hosted_zone_id, matching_zones):
if hosted_zone_id == "all":
deleted = []
for z in matching_zones:
deleted.append(z['Id'])
if not module.check_mode:
try:
client.delete_hosted_zone(Id=z['Id'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
changed = True
msg = "Successfully deleted zones: %s" % deleted
elif hosted_zone_id in [zo['Id'].replace('/hostedzone/', '') for zo in matching_zones]:
if not module.check_mode:
try:
client.delete_hosted_zone(Id=hosted_zone_id)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Could not delete hosted zone %s" % hosted_zone_id)
changed = True
msg = "Successfully deleted zone: %s" % hosted_zone_id
else:
changed = False
msg = "There is no zone to delete that matches hosted_zone_id %s." % hosted_zone_id
return changed, msg
def delete(module, client, matching_zones):
zone_in = module.params.get('zone').lower()
vpc_id = module.params.get('vpc_id')
vpc_region = module.params.get('vpc_region')
hosted_zone_id = module.params.get('hosted_zone_id')
if not zone_in.endswith('.'):
zone_in += "."
private_zone = bool(vpc_id and vpc_region)
if zone_in in [z['Name'] for z in matching_zones]:
if hosted_zone_id:
changed, result = delete_hosted_id(module, client, hosted_zone_id, matching_zones)
else:
if private_zone:
changed, result = delete_private(module, client, matching_zones, vpc_id, vpc_region)
else:
changed, result = delete_public(module, client, matching_zones)
else:
changed = False
result = "No zone to delete."
return changed, result
def main():
argument_spec = dict(
zone=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
vpc_id=dict(default=None),
vpc_region=dict(default=None),
comment=dict(default=''),
hosted_zone_id=dict(),
delegation_set_id=dict(),
)
mutually_exclusive = [
['delegation_set_id', 'vpc_id'],
['delegation_set_id', 'vpc_region'],
]
module = AnsibleAWSModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
)
zone_in = module.params.get('zone').lower()
state = module.params.get('state').lower()
vpc_id = module.params.get('vpc_id')
vpc_region = module.params.get('vpc_region')
if not zone_in.endswith('.'):
zone_in += "."
private_zone = bool(vpc_id and vpc_region)
client = module.client('route53')
zones = find_zones(module, client, zone_in, private_zone)
if state == 'present':
changed, result = create(module, client, matching_zones=zones)
elif state == 'absent':
changed, result = delete(module, client, matching_zones=zones)
if isinstance(result, dict):
module.exit_json(changed=changed, result=result, **result)
else:
module.exit_json(changed=changed, result=result)
if __name__ == '__main__':
main()
|
|
"""Module implementing the Pool for :mod:``requests_toolbelt.threaded``."""
import multiprocessing
import requests
from . import thread
from .._compat import queue
class Pool(object):
"""Pool that manages the threads containing sessions.
:param queue:
The queue you're expected to use to which you should add items.
:type queue: queue.Queue
:param initializer:
Function used to initialize an instance of ``session``.
:type initializer: collections.Callable
:param auth_generator:
Function used to generate new auth credentials for the session.
:type auth_generator: collections.Callable
:param int num_process:
Number of threads to create.
:param session:
:type session: requests.Session
"""
def __init__(self, job_queue, initializer=None, auth_generator=None,
num_processes=None, session=requests.Session):
if num_processes is None:
num_processes = multiprocessing.cpu_count() or 1
if num_processes < 1:
raise ValueError("Number of processes should at least be 1.")
self._job_queue = job_queue
self._response_queue = queue.Queue()
self._exc_queue = queue.Queue()
self._processes = num_processes
self._initializer = initializer or _identity
self._auth = auth_generator or _identity
self._session = session
self._pool = [
thread.SessionThread(self._new_session(), self._job_queue,
self._response_queue, self._exc_queue)
for _ in range(self._processes)
]
def _new_session(self):
return self._auth(self._initializer(self._session()))
@classmethod
def from_exceptions(cls, exceptions, **kwargs):
r"""Create a :class:`~Pool` from an :class:`~ThreadException`\ s.
Provided an iterable that provides :class:`~ThreadException` objects,
this classmethod will generate a new pool to retry the requests that
caused the exceptions.
:param exceptions:
Iterable that returns :class:`~ThreadException`
:type exceptions: iterable
:param kwargs:
Keyword arguments passed to the :class:`~Pool` initializer.
:returns: An initialized :class:`~Pool` object.
:rtype: :class:`~Pool`
"""
job_queue = queue.Queue()
for exc in exceptions:
job_queue.put(exc.request_kwargs)
return cls(job_queue=job_queue, **kwargs)
@classmethod
def from_urls(cls, urls, request_kwargs=None, **kwargs):
"""Create a :class:`~Pool` from an iterable of URLs.
:param urls:
Iterable that returns URLs with which we create a pool.
:type urls: iterable
:param dict request_kwargs:
Dictionary of other keyword arguments to provide to the request
method.
:param kwargs:
Keyword arguments passed to the :class:`~Pool` initializer.
:returns: An initialized :class:`~Pool` object.
:rtype: :class:`~Pool`
"""
request_dict = {'method': 'GET'}
request_dict.update(request_kwargs or {})
job_queue = queue.Queue()
for url in urls:
job = request_dict.copy()
job.update({'url': url})
job_queue.put(job)
return cls(job_queue=job_queue, **kwargs)
def exceptions(self):
"""Iterate over all the exceptions in the pool.
:returns: Generator of :class:`~ThreadException`
"""
while True:
exc = self.get_exception()
if exc is None:
break
yield exc
def get_exception(self):
"""Get an exception from the pool.
:rtype: :class:`~ThreadException`
"""
try:
(request, exc) = self._exc_queue.get_nowait()
except queue.Empty:
return None
else:
return ThreadException(request, exc)
def get_response(self):
"""Get a response from the pool.
:rtype: :class:`~ThreadResponse`
"""
try:
(request, response) = self._response_queue.get_nowait()
except queue.Empty:
return None
else:
return ThreadResponse(request, response)
def responses(self):
"""Iterate over all the responses in the pool.
:returns: Generator of :class:`~ThreadResponse`
"""
while True:
resp = self.get_response()
if resp is None:
break
yield resp
def join_all(self):
"""Join all the threads to the master thread."""
for session_thread in self._pool:
session_thread.join()
class ThreadProxy(object):
proxied_attr = None
def __getattr__(self, attr):
"""Proxy attribute accesses to the proxied object."""
get = object.__getattribute__
if attr not in self.attrs:
response = get(self, self.proxied_attr)
return getattr(response, attr)
else:
return get(self, attr)
class ThreadResponse(ThreadProxy):
"""A wrapper around a requests Response object.
This will proxy most attribute access actions to the Response object. For
example, if you wanted the parsed JSON from the response, you might do:
.. code-block:: python
thread_response = pool.get_response()
json = thread_response.json()
"""
proxied_attr = 'response'
attrs = frozenset(['request_kwargs', 'response'])
def __init__(self, request_kwargs, response):
#: The original keyword arguments provided to the queue
self.request_kwargs = request_kwargs
#: The wrapped response
self.response = response
class ThreadException(ThreadProxy):
"""A wrapper around an exception raised during a request.
This will proxy most attribute access actions to the exception object. For
example, if you wanted the message from the exception, you might do:
.. code-block:: python
thread_exc = pool.get_exception()
msg = thread_exc.message
"""
proxied_attr = 'exception'
attrs = frozenset(['request_kwargs', 'exception'])
def __init__(self, request_kwargs, exception):
#: The original keyword arguments provided to the queue
self.request_kwargs = request_kwargs
#: The captured and wrapped exception
self.exception = exception
def _identity(session_obj):
return session_obj
__all__ = ['ThreadException', 'ThreadResponse', 'Pool']
|
|
#!/usr/bin/env python2.4
import itertools
import time
import operator
def izip_longest(*args, **kwds):
# izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
fillvalue = kwds.get('fillvalue')
def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
yield counter() # yields the fillvalue, or raises IndexError
fillers = itertools.repeat(fillvalue)
iters = [itertools.chain(it, sentinel(), fillers) for it in args]
try:
for tup in itertools.izip(*iters):
yield tup
except IndexError:
pass
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
"""Groups iterable into n sized yields, filling gaps with fillvalue.
Copied from http://docs.python.org/library/itertools.html#recipes
Returns:
A generator of the groups.
"""
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
class BrokenPathException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class AIBot(object):
def __init__(self, game, delay=1):
self.game = game
self.facing = "N"
self.y = 2
self.x = 2
self.pos = (self.y, self.x)
self.distance = 2 + game.lantern
self.span = (self.distance * 2) + 1
try:
self.main_loop(delay)
except (KeyboardInterrupt, EOFError):
pass
def look(self):
g = self.game
l = self.game.lodmap
self.fov = l.parse_map(g.cli_look())
def pickup(self):
self.game.cli_pickup()
self.distance = 2 + self.game.lantern
self.span = (self.distance * 2) + 1
def move(self, direction):
self.facing = direction
self.walk()
def walk(self, do_look=True):
facing = self.facing
if facing == "N":
self.y -= 1
elif facing == "S":
self.y += 1
elif facing == "E":
self.x += 1
elif facing == "W":
self.x -= 1
self.game.cli_move(facing)
if do_look:
self.look()
def turn_left(self):
facing = self.facing
if facing == "N":
self.facing = "W"
elif facing == "S":
self.facing = "E"
elif facing == "E":
self.facing = "N"
elif facing == "W":
self.facing = "S"
def turn_right(self):
facing = self.facing
if facing == "N":
self.facing = "E"
elif facing == "S":
self.facing = "W"
elif facing == "E":
self.facing = "S"
elif facing == "W":
self.facing = "N"
def next_pos(self):
facing = self.facing
y = x = self.distance
if facing == "N":
y -= 1
elif facing == "S":
y += 1
elif facing == "E":
x += 1
elif facing == "W":
x -= 1
return (y,x)
def tile_in_fov(self, pos):
return self.fov[pos[0]][pos[1]]
def is_tile_in_fov(self, tile):
for row in self.fov:
for col in row:
if col == tile:
return True
return False
def tile_positions_in_fov(self, tile):
tiles = []
j = 0
i = 0
for row in self.fov:
for col in row:
if col == tile:
tiles.append((j, i))
i += 1
i = 0
j += 1
return tiles
def nearest_tiles(self, tile):
"nearest_tiles(TREASURE) --> [(2, 0), (0, 2), (0, 3)]"
"""Finds the positions of the nearest tiles in the bot's field of vision.
Args:
tile: The type of tile you want to find the coordinates of e.g.
self.game.lodmap.TREASURE.
Returns:
A list of the tiles positions sorted in ascending
order by manhattan distance from the bot e.g. [(2, 0), (0, 2), (0, 3)].
"""
def sort_tiles_by_dist(tiles_dists):
"sort_tiles_by_dist({(0, 2):2, (2, 0):1, (0, 3):3}) --> [(2, 0), (0, 2), (0, 3)]"
return [k for k,v in sorted(tiles_dists.iteritems(), key=operator.itemgetter(1))]
tiles_pos = self.tile_positions_in_fov(tile)
manhattan_dists = [(abs(self.distance - pos[0]) +
abs(self.distance - pos[1])) for pos in tiles_pos]
return sort_tiles_by_dist(dict(zip(tiles_pos, manhattan_dists)))
def a_star(self, start, goal):
"a_star((2, 2), (4, 1)) --> [(3, 2), (3, 1), (4, 1)]"
"""Finds shortest path between start and goal.
Adapted from http://en.wikipedia.org/wiki/A*_search_algorithm
Args:
start: (y, x) coordinate of the start of the path you want to find.
Usually the bot's current position, self.pos.
goal: (y, x) coordinate of where you want to get to.
Returns:
Shortest path between start and goal, not including the start, including
the goal.
Raises:
BrokenPathException(f_score): If the path can not be found.
"""
def heuristic_estimate_of_distance(start, goal):
return 0
def node_with_lowest_f_score(f_score, openset):
lowest_scoring_node = openset[0]
i = 0
for node in openset:
if f_score[node] < f_score[lowest_scoring_node]:
lowest_scoring_node = node
i += 0
return (lowest_scoring_node, i)
def neighbour_nodes(node):
"neighbour_nodes((2, 2)) --> [(1, 2), (2, 3), (3, 2), (2, 1)]"
"""Finds the valid neighbouring nodes to node.
Args:
node: The (y, x) of the node you want to find the neighbouring nodes
of.
Returns:
A list of the valid (inside the field of vision, and passable) nodes
that are neighbours of node.
"""
nodes = [(node[0] + 1, node[1]), # N
(node[0], node[1] + 1), # E
(node[0] - 1, node[1]), # S
(node[0], node[1] - 1)] # W
i = 0
# Remove invalid nodes:
while i < len(nodes):
curr = nodes[i]
y, x = curr[0], curr[1]
# 1. Outside FOV
if not (y in range(self.span) and x in range(self.span)):
del nodes[i]
continue
# 2. Impassable
elif self.tile_in_fov(curr) in (l.WALL, l.UNKNOWN, l.OUTSIDE):
del nodes[i]
continue
i += 1
return nodes
def reconstruct_path(came_from, current_node):
if current_node in came_from:
p = reconstruct_path(came_from, came_from[current_node])
return (p + current_node)
else:
return current_node
l = self.game.lodmap
closedset = [] # The set of nodes already evaluated.
openset = [start] # The set of tentative nodes to be evaluated.
came_from = {} # The map of navigated nodes.
g_score = {start: 0} # Distance from start along optimal path.
h_score = {start: heuristic_estimate_of_distance(start, goal)}
f_score = {start: h_score[start]} # Estimated total distance from start to goal through y.
while len(openset) != 0:
x, x_index = node_with_lowest_f_score(f_score, openset)
if x == goal:
return [pair for pair in grouper(2, reconstruct_path(came_from,
came_from[goal]))][1:] + [goal]
del openset[x_index]
closedset.append(x)
for y in neighbour_nodes(x):
if y in closedset:
continue
tentative_g_score = g_score[x] + heuristic_estimate_of_distance(x, y)
if not y in openset:
openset.append(y)
tentative_is_better = True
elif tentative_g_score < g_score[y]:
tentative_is_better = True
else:
tentative_is_better = False
if tentative_is_better:
came_from[y] = x
g_score[y] = tentative_g_score
h_score[y] = heuristic_estimate_of_distance(y, goal)
f_score[y] = g_score[y] + h_score[y]
raise BrokenPathException(f_score)
def walk_path(self, pos, path):
def direction_of_adjacent_node(node, adj_node):
if adj_node[0] == node[0]:
if adj_node[1] < node[1]:
return "W"
else:
return "E"
else:
if adj_node[0] < node[0]:
return "N"
else:
return "S"
directions = []
for node in path:
directions.append(direction_of_adjacent_node(pos, node))
pos = node
for direction in directions:
self.move(direction)
def main_loop(self, delay):
l = self.game.lodmap
self.look() # initialise the field of vision self.fov
while True:
time.sleep(delay)
# pickup gold we spawned on top of
if self.tile_in_fov(self.pos) == l.TREASURE:
self.pickup()
# pathfind to gold
elif self.is_tile_in_fov(l.TREASURE):
nearest_tiles = self.nearest_tiles(l.TREASURE)
for target in nearest_tiles:
# try to get to the nearest target
try:
path = self.a_star((2, 2), target)
# if we can pathfind to it, pick it up
self.walk_path((2, 2), path)
self.pickup()
break
except BrokenPathException:
# otherwise, try the next nearest target
continue
# turn away from walls
while self.tile_in_fov(self.next_pos()) == l.WALL:
self.turn_right()
self.walk()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RoleManagementPolicyAssignmentsOperations(object):
"""RoleManagementPolicyAssignmentsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.authorization.v2020_10_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
scope, # type: str
role_management_policy_assignment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RoleManagementPolicyAssignment"
"""Get the specified role management policy assignment for a resource scope.
:param scope: The scope of the role management policy.
:type scope: str
:param role_management_policy_assignment_name: The name of format {guid_guid} the role
management policy assignment to get.
:type role_management_policy_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleManagementPolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleManagementPolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleManagementPolicyAssignmentName': self._serialize.url("role_management_policy_assignment_name", role_management_policy_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleManagementPolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleManagementPolicyAssignments/{roleManagementPolicyAssignmentName}'} # type: ignore
def create(
self,
scope, # type: str
role_management_policy_assignment_name, # type: str
parameters, # type: "_models.RoleManagementPolicyAssignment"
**kwargs # type: Any
):
# type: (...) -> "_models.RoleManagementPolicyAssignment"
"""Create a role management policy assignment.
:param scope: The scope of the role management policy assignment to upsert.
:type scope: str
:param role_management_policy_assignment_name: The name of format {guid_guid} the role
management policy assignment to upsert.
:type role_management_policy_assignment_name: str
:param parameters: Parameters for the role management policy assignment.
:type parameters: ~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyAssignment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleManagementPolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleManagementPolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleManagementPolicyAssignmentName': self._serialize.url("role_management_policy_assignment_name", role_management_policy_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RoleManagementPolicyAssignment')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleManagementPolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleManagementPolicyAssignments/{roleManagementPolicyAssignmentName}'} # type: ignore
def delete(
self,
scope, # type: str
role_management_policy_assignment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete a role management policy assignment.
:param scope: The scope of the role management policy assignment to delete.
:type scope: str
:param role_management_policy_assignment_name: The name of format {guid_guid} the role
management policy assignment to delete.
:type role_management_policy_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleManagementPolicyAssignmentName': self._serialize.url("role_management_policy_assignment_name", role_management_policy_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleManagementPolicyAssignments/{roleManagementPolicyAssignmentName}'} # type: ignore
def list_for_scope(
self,
scope, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RoleManagementPolicyAssignmentListResult"]
"""Gets role management assignment policies for a resource scope.
:param scope: The scope of the role management policy.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RoleManagementPolicyAssignmentListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleManagementPolicyAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_for_scope.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RoleManagementPolicyAssignmentListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_for_scope.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleManagementPolicyAssignments'} # type: ignore
|
|
from __future__ import unicode_literals
import logging
import sys
from io import BytesIO
from threading import Lock
from django import http
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_str, force_text, iri_to_uri
logger = logging.getLogger('django.request')
# See http://www.iana.org/assignments/http-status-codes
STATUS_CODE_TEXT = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
102: 'PROCESSING',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
207: 'MULTI-STATUS',
208: 'ALREADY REPORTED',
226: 'IM USED',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
422: 'UNPROCESSABLE ENTITY',
423: 'LOCKED',
424: 'FAILED DEPENDENCY',
426: 'UPGRADE REQUIRED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
506: 'VARIANT ALSO NEGOTIATES',
507: 'INSUFFICIENT STORAGE',
508: 'LOOP DETECTED',
510: 'NOT EXTENDED',
}
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = base.get_script_name(environ)
path_info = force_text(environ.get('PATH_INFO', '/'))
if not path_info or path_info == script_name:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
#
# (The comparison of path_info to script_name is to work around an
# apparent bug in flup 1.0.1. See Django ticket #8490).
path_info = '/'
self.environ = environ
self.path_info = path_info
self.path = '%s%s' % (script_name, path_info)
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
self._post_parse_error = False
try:
content_length = int(self.environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.environ.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.environ.get('QUERY_STRING', ''))) or '')
def _is_secure(self):
return 'wsgi.url_scheme' in self.environ and self.environ['wsgi.url_scheme'] == 'https'
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
# The WSGI spec says 'QUERY_STRING' may be absent.
self._get = http.QueryDict(self.environ.get('QUERY_STRING', ''), encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self.environ.get('HTTP_COOKIE', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialised.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(base.get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
finally:
signals.request_finished.send(sender=self.__class__)
try:
status_text = STATUS_CODE_TEXT[response.status_code]
except KeyError:
status_text = 'UNKNOWN STATUS CODE'
status = '%s %s' % (response.status_code, status_text)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.