gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python3
import struct
import json
import socket
import re
from enum import Enum
from Xlib import display
class MessageType(Enum):
COMMAND = 0
GET_WORKSPACES = 1
SUBSCRIBE = 2
GET_OUTPUTS = 3
GET_TREE = 4
GET_MARKS = 5
GET_BAR_CONFIG = 6
GET_VERSION = 7
class Event(object):
WORKSPACE = (1 << 0)
OUTPUT = (1 << 1)
MODE = (1 << 2)
WINDOW = (1 << 3)
BARCONFIG_UPDATE = (1 << 4)
BINDING = (1 << 5)
class _ReplyType(dict):
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
class CommandReply(_ReplyType):
pass
class VersionReply(_ReplyType):
pass
class BarConfigReply(_ReplyType):
pass
class OutputReply(_ReplyType):
pass
class WorkspaceReply(_ReplyType):
pass
class WorkspaceEvent(object):
def __init__(self, data, conn):
self.change = data['change']
self.current = None
self.old = None
if 'current' in data and data['current']:
self.current = Con(data['current'], None, conn)
if 'old' in data and data['old']:
self.old = Con(data['old'], None, conn)
class GenericEvent(object):
def __init__(self, data):
self.change = data['change']
class WindowEvent(object):
def __init__(self, data, conn):
self.change = data['change']
self.container = Con(data['container'], None, conn)
class BarconfigUpdateEvent(object):
def __init__(self, data):
self.id = data['id']
self.hidden_state = data['hidden_state']
self.mode = data['mode']
class BindingInfo(object):
def __init__(self, data):
self.command = data['command']
self.mods = data['mods']
self.input_code = data['input_code']
self.symbol = data['symbol']
self.input_type = data['input_type']
class BindingEvent(object):
def __init__(self, data):
self.change = data['change']
self.binding = BindingInfo(data['binding'])
class _PubSub(object):
def __init__(self, conn):
self.conn = conn
self._subscriptions = []
def subscribe(self, detailed_event, handler):
event = detailed_event.replace('_', '-')
detail = ''
if detailed_event.count('::') > 0:
[event, detail] = detailed_event.split('::')
self._subscriptions.append({'event': event, 'detail': detail,
'handler': handler})
def emit(self, event, data):
detail = ''
if data:
detail = data.change
for s in self._subscriptions:
if s['event'] == event:
if not s['detail'] or s['detail'] == detail:
if data:
s['handler'](self.conn, data)
else:
s['handler'](self.conn)
# this is for compatability with i3ipc-glib
class _PropsObject(object):
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
def __getattribute__(self, name):
return getattr(object.__getattribute__(self, "_obj"), name)
def __delattr__(self, name):
delattr(object.__getattribute__(self, "_obj"), name)
def __setattr__(self, name, value):
setattr(object.__getattribute__(self, "_obj"), name, value)
class Connection(object):
MAGIC = 'i3-ipc' # safety string for i3-ipc
_chunk_size = 1024 # in bytes
_timeout = 0.5 # in seconds
_struct_header = '<%dsII' % len(MAGIC.encode('utf-8'))
_struct_header_size = struct.calcsize(_struct_header)
def __init__(self):
d = display.Display()
r = d.screen().root
data = r.get_property(d.get_atom('I3_SOCKET_PATH'),
d.get_atom('UTF8_STRING'), 0, 9999)
if not data.value:
raise Exception('could not get i3 socket path')
self._pubsub = _PubSub(self)
self.props = _PropsObject(self)
self.subscriptions = 0
self.socket_path = data.value
self.cmd_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.cmd_socket.connect(self.socket_path)
def _pack(self, msg_type, payload):
"""
Packs the given message type and payload. Turns the resulting
message into a byte string.
"""
pb = payload.encode()
s = struct.pack('=II', len(pb), msg_type.value)
return self.MAGIC.encode() + s + pb
def _unpack(self, data):
"""
Unpacks the given byte string and parses the result from JSON.
Returns None on failure and saves data into "self.buffer".
"""
msg_magic, msg_length, msg_type = self._unpack_header(data)
msg_size = self._struct_header_size + msg_length
# XXX: Message shouldn't be any longer than the data
return data[self._struct_header_size:msg_size].decode('utf-8')
def _unpack_header(self, data):
"""
Unpacks the header of given byte string.
"""
return struct.unpack(self._struct_header,
data[:self._struct_header_size])
def _ipc_recv(self, sock):
data = sock.recv(14)
if len(data) == 0:
# EOF
return '', 0
msg_magic, msg_length, msg_type = self._unpack_header(data)
msg_size = self._struct_header_size + msg_length
while len(data) < msg_size:
data += sock.recv(msg_length)
return self._unpack(data), msg_type
def _ipc_send(self, sock, message_type, payload):
sock.sendall(self._pack(message_type, payload))
data, msg_type = self._ipc_recv(sock)
return data
def message(self, message_type, payload):
return self._ipc_send(self.cmd_socket, message_type, payload)
def command(self, payload):
data = self.message(MessageType.COMMAND, payload)
return json.loads(data, object_hook=CommandReply)
def get_version(self):
data = self.message(MessageType.GET_VERSION, '')
return json.loads(data, object_hook=VersionReply)
def get_bar_config(self, bar_id=None):
# default to the first bar id
if not bar_id:
bar_config_list = self.get_bar_config_list()
if not bar_config_list:
return None
bar_id = bar_config_list[0]
data = self.message(MessageType.GET_BAR_CONFIG, bar_id)
return json.loads(data, object_hook=BarConfigReply)
def get_bar_config_list(self):
data = self.message(MessageType.GET_BAR_CONFIG, '')
return json.loads(data)
def get_outputs(self):
data = self.message(MessageType.GET_OUTPUTS, '')
return json.loads(data, object_hook=OutputReply)
def get_workspaces(self):
data = self.message(MessageType.GET_WORKSPACES, '')
return json.loads(data, object_hook=WorkspaceReply)
def get_tree(self):
data = self.message(MessageType.GET_TREE, '')
return Con(json.loads(data), None, self)
def subscribe(self, events):
events_obj = []
if events & Event.WORKSPACE:
events_obj.append("workspace")
if events & Event.OUTPUT:
events_obj.append("output")
if events & Event.MODE:
events_obj.append("mode")
if events & Event.WINDOW:
events_obj.append("window")
if events & Event.BARCONFIG_UPDATE:
events_obj.append("barconfig_update")
if events & Event.BINDING:
events_obj.append("binding")
data = self._ipc_send(
self.sub_socket, MessageType.SUBSCRIBE, json.dumps(events_obj))
result = json.loads(data, object_hook=CommandReply)
self.subscriptions |= events
return result
def on(self, detailed_event, handler):
event = detailed_event.replace('_', '-')
detail = ''
if detailed_event.count('::') > 0:
[event, detail] = detailed_event.split('::')
# special case: ipc-shutdown is not in the protocol
if event == 'ipc-shutdown':
self._pubsub.subscribe(event, handler)
return
event_type = 0
if event == "workspace":
event_type = Event.WORKSPACE
elif event == "output":
event_type = Event.OUTPUT
elif event == "mode":
event_type = Event.MODE
elif event == "window":
event_type = Event.WINDOW
elif event == "barconfig_update":
event_type = Event.BARCONFIG_UPDATE
elif event == "binding":
event_type = Event.BINDING
if not event_type:
raise Exception('event not implemented')
self.subscriptions |= event_type
self._pubsub.subscribe(detailed_event, handler)
def main(self):
self.sub_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sub_socket.connect(self.socket_path)
self.subscribe(self.subscriptions)
while True:
if self.sub_socket is None:
break
data, msg_type = self._ipc_recv(self.sub_socket)
if len(data) == 0:
# EOF
self._pubsub.emit('ipc-shutdown', None)
break
data = json.loads(data)
msg_type = 1 << (msg_type & 0x7f)
event_name = ''
event = None
if msg_type == Event.WORKSPACE:
event_name = 'workspace'
event = WorkspaceEvent(data, self)
elif msg_type == Event.OUTPUT:
event_name = 'output'
event = GenericEvent(data)
elif msg_type == Event.MODE:
event_name = 'mode'
event = GenericEvent(data)
elif msg_type == Event.WINDOW:
event_name = 'window'
event = WindowEvent(data, self)
elif msg_type == Event.BARCONFIG_UPDATE:
event_name = 'barconfig_update'
event = BarconfigUpdateEvent(data)
elif msg_type == Event.BINDING:
event_name = 'binding'
event = BindingEvent(data)
else:
# we have not implemented this event
continue
self._pubsub.emit(event_name, event)
def main_quit(self):
self.sub_socket.close()
self.sub_socket = None
class Rect(object):
def __init__(self, data):
self.x = data['x']
self.y = data['y']
self.height = data['height']
self.width = data['width']
class Con(object):
def __init__(self, data, parent, conn):
self.props = _PropsObject(self)
self._conn = conn
self.parent = parent
# set simple properties
ipc_properties = ['border', 'current_border_width', 'focused',
'fullscreen_mode', 'id', 'layout', 'mark', 'name',
'orientation', 'percent', 'type', 'urgent', 'window']
for attr in ipc_properties:
if attr in data:
setattr(self, attr, data[attr])
else:
setattr(self, attr, None)
# XXX this is for compatability with 4.8
if isinstance(self.type, int):
if self.type == 0:
self.type = "root"
elif self.type == 1:
self.type = "output"
elif self.type == 2 or self.type == 3:
self.type = "con"
elif self.type == 4:
self.type = "workspace"
elif self.type == 5:
self.type = "dockarea"
# set complex properties
self.nodes = []
for n in data['nodes']:
self.nodes.append(Con(n, self, conn))
self.floating_nodes = []
for n in data['floating_nodes']:
self.nodes.append(Con(n, self, conn))
self.window_class = None
self.window_instance = None
if 'window_properties' in data:
if 'class' in data['window_properties']:
self.window_class = data['window_properties']['class']
if 'instance' in data['window_properties']:
self.window_instance = data['window_properties']['instance']
self.rect = Rect(data['rect'])
def root(self):
if not self.parent:
return self
con = self.parent
while con.parent:
con = con.parent
return con
def descendents(self):
descendents = []
def collect_descendents(con):
for c in con.nodes:
descendents.append(c)
collect_descendents(c)
for c in con.floating_nodes:
descendents.append(c)
collect_descendents(c)
collect_descendents(self)
return descendents
def leaves(self):
leaves = []
for c in self.descendents():
if not c.nodes and c.type == "con" and c.parent.type != "dockarea":
leaves.append(c)
return leaves
def command(self, command):
self._conn.command('[con_id="{}"] {}'.format(self.id, command))
def command_children(self, command):
if not len(self.nodes):
return
commands = []
for c in self.nodes:
commands.append('[con_id="{}" {};'.format(self.id, command))
self._conn.command(' '.join(commands))
def workspaces(self):
workspaces = []
def collect_workspaces(con):
if con.type == "workspace" and not con.name.startswith('__'):
workspaces.append(con)
return
for c in con.nodes:
collect_workspaces(c)
collect_workspaces(self.root())
return workspaces
def find_focused(self):
try:
return next(c for c in self.descendents() if c.focused)
except StopIteration:
return None
def find_by_id(self, id):
try:
return next(c for c in self.descendents() if c.id == id)
except StopIteration:
return None
def find_by_window(self, window):
try:
return next(c for c in self.descendents() if c.window == window)
except StopIteration:
return None
def find_named(self, pattern):
return [c for c in self.descendents()
if c.name and re.search(pattern, c.name)]
def find_classed(self, pattern):
return [c for c in self.descendents()
if c.window_class and re.search(pattern, c.window_class)]
def find_marked(self, pattern):
return [c for c in self.descendents()
if c.mark and re.search(pattern, c.mark)]
def workspace(self):
ret = self.parent
while ret:
if ret.type == 'workspace':
break
ret = ret.parent
return ret
def scratchpad(self):
root = self.root()
i3con = None
for c in root.nodes:
if c.name == "__i3":
i3con = c
break
if not i3con:
return None
i3con_content = None
for c in i3con.nodes:
if c.name == "content":
i3con_content = c
break
if not i3con_content:
return None
scratch = None
for c in i3con_content.nodes:
if c.name == "__i3_scratch":
scratch = c
break
return scratch
|
|
# Copyright 2015, eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.api import nova
from neutron_lbaas_dashboard import api
LOG = logging.getLogger(__name__)
__create_new__ = "Create New"
class SetLBDetailsAction(workflows.Action):
address = forms.ChoiceField(label=_("IP"),
help_text=_("Select from existing VIP IPs"))
name = forms.CharField(max_length=80, label=_("Name"),
required=True)
description = forms.CharField(widget=forms.Textarea(attrs={'rows': 2}),
label=_(
"Load Balancer Description"),
required=False,
help_text=_("Provide Load Balancer "
"Description."))
all_vips = None
is_update = False
LOAD_BALANCING_CHOICES = (
("RoundRobin", _("Round Robin")),
("LeastConnection", _("Least Connection")),
("LeastSessions", _("Least Sessions"))
)
lb_method = forms.ChoiceField(label=_("Load Balancing Method"),
choices=LOAD_BALANCING_CHOICES)
PROTOCOL_CHOICES = (
("HTTP", _("HTTP")),
("HTTPS", _("HTTPS")),
("TCP", _("TCP")),
("SSL", _("SSL")),
)
protocol_type = forms.ChoiceField(
label=_("LB Protocol"), choices=PROTOCOL_CHOICES)
port = forms.IntegerField(label=_("LB Port"),
required=False,
min_value=1,
max_value=65535,
help_text=_("LB Port on which "
"LB is listening."))
instance_port = forms.IntegerField(label=_("Instance Port"),
required=False,
min_value=1,
max_value=65535,
help_text=_("Instance Port on which "
"service is running."))
def __init__(self, request, *args, **kwargs):
super(SetLBDetailsAction, self).__init__(request, *args, **kwargs)
self.all_vips = []
try:
# todo - this should be obtained in view via an initial method
self.all_vips = api.lbaasv2.list_loadbalancers(request)
except Exception:
pass
if len(self.fields['address'].choices) == 0:
del self.fields['address']
class Meta(object):
name = _("LB Details")
help_text_template = ("project/loadbalancersv2/_launch_lb_help.html")
def clean(self):
cleaned_data = super(SetLBDetailsAction, self).clean()
lb_method = cleaned_data['lb_method']
if not (lb_method == 'RoundRobin'
or lb_method == 'LeastConnection'
or lb_method == 'LeastSessions'):
raise forms.ValidationError(_("Please select an option for "
"the load balancing method."))
if not self.is_update:
all_vips = self.all_vips
ipPortCombo = []
for vip in all_vips:
vip = vip.readable()
ipPortCombo.append('%s:%s' % (vip.address, vip.port))
data = self.data
if 'address' in data \
and data['address'] != 'new' \
and data['address'] != '':
address = data['address']
selected_lb_port = data['port']
selected_ip_port_combo = '%s:%s' % (address.split(':')[0],
selected_lb_port)
if selected_ip_port_combo in ipPortCombo:
raise forms.ValidationError(_('Requested IP and port '
'combination already '
'exists %s ') %
selected_ip_port_combo)
instance_port = cleaned_data.get('instance_port', None)
if not instance_port:
raise forms.ValidationError(
_('Please provide instance port'))
return cleaned_data
def populate_address_choices(self, request, context):
if self.is_update:
return []
try:
vips = api.lbaasv2.list_loadbalancers(request)
if len(vips) == 0:
return []
distict_ips = set()
for vip in vips:
vip = vip.readable()
distict_ips.add(vip.address)
existing = []
for vip in vips:
vip = vip.readable()
if vip.address in distict_ips:
item = ("%s:%s:%s" %
(vip.address, vip.name, 443),
"%s" % vip.address)
existing.append(item)
distict_ips.remove(vip.address)
vip_list = []
if len(existing) > 0:
vip_list.append(('new', __create_new__))
vip_list.append(('Select Existing', existing))
return vip_list
except Exception:
exceptions.handle(request,
_('Unable to retrieve vips.'))
return []
def get_help_text(self):
extra = {}
return super(SetLBDetailsAction, self).get_help_text(extra)
class SetLBDetails(workflows.Step):
action_class = SetLBDetailsAction
contributes = ("name", "description", "lb_method", "protocol_type", "port",
"source_id", "instance_port", "address", "monitor")
def contribute(self, data, context):
context = super(SetLBDetails, self).contribute(data, context)
return context
template_name = "project/loadbalancersv2/launch_lb.html"
class UploadSSLAction(workflows.Action):
update_cert = forms.BooleanField(label='Update SSL Certificate',
required=False,
widget=forms.HiddenInput())
cert_name = forms.CharField(max_length=80,
label=_("Certificate Name"),
required=False)
cert = forms.CharField(widget=forms.Textarea(attrs={'rows': 3}),
label=_("Certificate"),
required=False,
help_text=_("Certificate"))
private_key = forms.CharField(widget=forms.Textarea(attrs={'rows': 3}),
label=_("Private Key"),
required=False,
help_text=_("Private Key"))
chain_cert = forms.CharField(widget=forms.Textarea(attrs={'rows': 3}),
label=_("Certificate Chain (Optional)"),
required=False,
help_text=_("Intermediate Chain"
" Certificates"))
def clean(self):
cleaned_data = super(UploadSSLAction, self).clean()
data = self.data
protocol = data.get('source_type')
if protocol == 'HTTPS':
use_common_cert = data.get('use_common_cert')
if not use_common_cert:
# check to see if ssl cert is provided
cert_name = data.get('cert_name')
cert = data.get('cert')
private_key = data.get('private_key')
if (not cert_name) \
or (not cert) \
or (not private_key):
raise forms.ValidationError(
_('Please provide all certificate parameters.'))
return cleaned_data
class Meta(object):
name = _("SSL Certificate")
help_text_template = ("project/loadbalancersv2/_ssl_cert_help.html")
class UploadSSLStep(workflows.Step):
action_class = UploadSSLAction
contributes = ("cert_name", "cert",
"private_key", "chain_cert", 'use_common_cert')
template_name = "project/loadbalancersv2/ssl_cert.html"
def contribute(self, data, context):
post = self.workflow.request.POST
context['cert_name'] = post['cert_name'] if 'cert_name' in post else ''
context['cert'] = post['cert'] if 'cert' in post else ''
context['private_key'] = post[
'private_key'] if 'private_key' in post else ''
context['chain_cert'] = post[
'chain_cert'] if 'chain_cert' in post else ''
context['use_common_cert'] = post[
'use_common_cert'] if 'use_common_cert' in post else ''
return context
class SelectInstancesAction(workflows.MembershipAction):
instance_details = {}
def __init__(self, request, *args, **kwargs):
super(SelectInstancesAction, self).__init__(request, *args, **kwargs)
err_msg = _('Unable to retrieve members list. '
'Please try again later.')
default_role_field_name = self.get_default_role_field_name()
self.fields[default_role_field_name] = forms.CharField(required=False,
label='')
self.fields[default_role_field_name].initial = 'member'
role_member_field_name = self.get_member_field_name('member')
self.fields[role_member_field_name] = forms.MultipleChoiceField(
required=False, label='')
# Get list of available instances
all_instances = []
try:
all_instances, has_more_data = nova.server_list(request)
except Exception:
exceptions.handle(request, err_msg)
available_instances = []
for instance in all_instances:
# skip shutoff instances
# if instance.status == 'SHUTOFF':
# continue
instance_ip = self.get_ip(instance)
# skip instances which has no network
if not instance_ip:
continue
key = instance_ip
value = instance.name + ' (' + self.get_ip(instance) + ')'
available_instances.append((key, value))
self.instance_details[instance_ip] = (instance.name, instance.id)
self.fields[self.get_member_field_name('member')].\
choices = available_instances
def get_ip(self, instance):
ipaddress = None
for networks in instance.addresses.itervalues():
for ip in networks:
# only one IP present
ipaddress = ip
break
if ipaddress is not None:
addr = ipaddress["addr"]
else:
addr = None # '10.10.10.10'
return addr
def clean(self):
cleaned_data = super(SelectInstancesAction, self).clean()
members = cleaned_data.get(self.get_member_field_name('member'), None)
if not members:
raise forms.ValidationError(
_('Please select at least one member'))
return cleaned_data
class Meta(object):
name = _("Instances")
slug = "select_instances"
class SelectInstancesStep(workflows.UpdateMembersStep):
action_class = SelectInstancesAction
help_text = _("Please select a list of instances that should handle"
" traffic for this target load balancer. All instances "
"must reside in the same Project as the target load "
"balancer.")
available_list_title = _("All Instances")
members_list_title = _("Selected Instances")
no_available_text = _("No instances found.")
no_members_text = _("No members enabled.")
show_roles = False
contributes = (
"wanted_members", "instances_details", "monitor", "instance_port")
template_name = "horizon/common/_workflow_step_update_members.html"
def contribute(self, data, context):
request = self.workflow.request
if data:
context["wanted_members"] = request.POST.getlist(
self.get_member_field_name('member'))
context["instances_details"] = self.action.instance_details
context["monitor"] = request.POST.get("monitor")
context["instance_port"] = request.POST.get("instance_port")
return context
class SelectMonitorAction(workflows.Action):
MONITOR_CHOICES = (
("tcp", _("TCP")),
("ping", _("PING")),
("http", _("HTTP")),
)
monitor = forms.ChoiceField(label=_("Monitor"),
choices=MONITOR_CHOICES)
interval = forms.IntegerField(label=_("Health Check Interval"
" (in seconds)"),
required=False,
min_value=1,
max_value=600,
help_text=_("Health Check Interval"
" (in seconds)"))
timeout = forms.IntegerField(label=_("Retry count before markdown"),
required=False,
min_value=1,
max_value=100,
help_text=_("Number of times health check "
"should be attempted before "
"marking down a member"))
send = forms.CharField(widget=forms.Textarea(attrs={'rows': 1}),
label=_("Send String"),
required=False,
help_text=_("Send String"))
receive = forms.CharField(widget=forms.Textarea(attrs={'rows': 1}),
label=_("Receive String"),
required=False,
help_text=_("Receive String"))
class Meta(object):
name = _("Monitor")
help_text_template = ("project/loadbalancersv2/_monitor_help.html")
class SelectMonitorStep(workflows.Step):
action_class = SelectMonitorAction
contributes = ("monitor", "interval", "timeout", "send", "receive")
template_name = "project/loadbalancersv2/_monitor_create.html"
def contribute(self, data, context):
post = self.workflow.request.POST
context['interval'] = post['interval'] if 'interval' in post else ''
context['timeout'] = post['timeout'] if 'timeout' in post else ''
context['send'] = post['send'] if 'send' in post else ''
context['receive'] = post['receive'] if 'receive' in post else ''
return context
class LaunchLoadBalancer(workflows.Workflow):
slug = "launch_loadbalancer"
name = _("Launch Load Balancer")
finalize_button_name = _("Launch")
success_message = _('Launched %(count)s named "%(name)s".')
failure_message = _('Unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:loadbalancersv2:index"
default_steps = (SetLBDetails,
UploadSSLStep,
SelectMonitorStep,
SelectInstancesStep,
)
attrs = {'data-help-text': 'LB creation may take a few minutes'}
def format_status_message(self, message):
name = self.context.get('name', 'unknown loadbalancer')
count = self.context.get('count', 1)
if int(count) > 1:
return message % {"count": _("%s loadbalancers") % count,
"name": name}
else:
return message % {"count": _("loadbalancer"), "name": name}
def handle(self, request, context):
try:
protocol = context['source_type']
address = context['address']
if not address\
or address == "new":
address = ''
else:
tokens = address.split(':')
address = tokens[0]
api.lbaasv2.\
create_loadbalancer_full(request,
address=address,
name=context['name'],
description=context['description'],
lb_method=context['lb_method'],
monitor=context['monitor'],
protocol=protocol,
port=context[protocol],
instance_port=context['instance_port'], # noqa
wanted_members=context['wanted_members'], # noqa
instances_details=context['instances_details'], # noqa
cert_name=context['cert_name'],
cert=context['cert'],
private_key=context['private_key'],
chain_cert=context['chain_cert'],
use_common_cert=True if
context['use_common_cert'] == 'on'
else False,
interval=context['interval'],
timeout=context['timeout'],
send=context['send'],
receive=context['receive'],
)
return True
except Exception as e:
exceptions.handle(request, e.message, ignore=False)
return False
|
|
"""
Test the ColumnTransformer.
"""
import numpy as np
from scipy import sparse
import pytest
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_dict_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_allclose_dense_sparse
from sklearn.base import BaseEstimator
from sklearn.externals import six
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import StandardScaler, Normalizer, OneHotEncoder
from sklearn.feature_extraction import DictVectorizer
class Trans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# 1D Series -> 2D DataFrame
if hasattr(X, 'to_frame'):
return X.to_frame()
# 1D array -> 2D array
if X.ndim == 1:
return np.atleast_2d(X).T
return X
class DoubleTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X):
return 2*X
class SparseMatrixTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
n_samples = len(X)
return sparse.eye(n_samples, n_samples).tocsr()
class TransNo2D(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X
class TransRaise(BaseEstimator):
def fit(self, X, y=None):
raise ValueError("specific message")
def transform(self, X, y=None):
raise ValueError("specific message")
def test_column_transformer():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first1D = np.array([0, 1, 2])
X_res_second1D = np.array([2, 4, 6])
X_res_first = X_res_first1D.reshape(-1, 1)
X_res_both = X_array
cases = [
# single column 1D / 2D
(0, X_res_first),
([0], X_res_first),
# list-like
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
]
for selection, res in cases:
ct = ColumnTransformer([('trans', Trans(), selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_array), res)
assert_array_equal(ct.fit(X_array).transform(X_array), res)
# callable that returns any of the allowed specifiers
ct = ColumnTransformer([('trans', Trans(), lambda x: selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_array), res)
assert_array_equal(ct.fit(X_array).transform(X_array), res)
ct = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])])
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
# test with transformer_weights
transformer_weights = {'trans1': .1, 'trans2': 10}
both = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])],
transformer_weights=transformer_weights)
res = np.vstack([transformer_weights['trans1'] * X_res_first1D,
transformer_weights['trans2'] * X_res_second1D]).T
assert_array_equal(both.fit_transform(X_array), res)
assert_array_equal(both.fit(X_array).transform(X_array), res)
assert len(both.transformers_) == 2
both = ColumnTransformer([('trans', Trans(), [0, 1])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both)
assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both)
assert len(both.transformers_) == 1
def test_column_transformer_dataframe():
pd = pytest.importorskip('pandas')
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
X_res_both = X_array
cases = [
# String keys: label based
# scalar
('first', X_res_first),
# list
(['first'], X_res_first),
(['first', 'second'], X_res_both),
# slice
(slice('first', 'second'), X_res_both),
# int keys: positional
# scalar
(0, X_res_first),
# list
([0], X_res_first),
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
(pd.Series([True, False], index=['first', 'second']), X_res_first),
]
for selection, res in cases:
ct = ColumnTransformer([('trans', Trans(), selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_df), res)
assert_array_equal(ct.fit(X_df).transform(X_df), res)
# callable that returns any of the allowed specifiers
ct = ColumnTransformer([('trans', Trans(), lambda X: selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_df), res)
assert_array_equal(ct.fit(X_df).transform(X_df), res)
ct = ColumnTransformer([('trans1', Trans(), ['first']),
('trans2', Trans(), ['second'])])
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
ct = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])])
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# test with transformer_weights
transformer_weights = {'trans1': .1, 'trans2': 10}
both = ColumnTransformer([('trans1', Trans(), ['first']),
('trans2', Trans(), ['second'])],
transformer_weights=transformer_weights)
res = np.vstack([transformer_weights['trans1'] * X_df['first'],
transformer_weights['trans2'] * X_df['second']]).T
assert_array_equal(both.fit_transform(X_df), res)
assert_array_equal(both.fit(X_df).transform(X_df), res)
assert len(both.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# test multiple columns
both = ColumnTransformer([('trans', Trans(), ['first', 'second'])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
assert len(both.transformers_) == 1
assert ct.transformers_[-1][0] != 'remainder'
both = ColumnTransformer([('trans', Trans(), [0, 1])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
assert len(both.transformers_) == 1
assert ct.transformers_[-1][0] != 'remainder'
# ensure pandas object is passes through
class TransAssert(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
assert_true(isinstance(X, (pd.DataFrame, pd.Series)))
if isinstance(X, pd.Series):
X = X.to_frame()
return X
ct = ColumnTransformer([('trans', TransAssert(), 'first')],
remainder='drop')
ct.fit_transform(X_df)
ct = ColumnTransformer([('trans', TransAssert(), ['first', 'second'])])
ct.fit_transform(X_df)
# integer column spec + integer column names -> still use positional
X_df2 = X_df.copy()
X_df2.columns = [1, 0]
ct = ColumnTransformer([('trans', Trans(), 0)], remainder='drop')
assert_array_equal(ct.fit_transform(X_df), X_res_first)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'drop'
assert_array_equal(ct.transformers_[-1][2], [1])
def test_column_transformer_sparse_array():
X_sparse = sparse.eye(3, 2).tocsr()
# no distinction between 1D and 2D
X_res_first = X_sparse[:, 0]
X_res_both = X_sparse
for col in [0, [0], slice(0, 1)]:
for remainder, res in [('drop', X_res_first),
('passthrough', X_res_both)]:
ct = ColumnTransformer([('trans', Trans(), col)],
remainder=remainder,
sparse_threshold=0.8)
assert_true(sparse.issparse(ct.fit_transform(X_sparse)))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),
res)
for col in [[0, 1], slice(0, 2)]:
ct = ColumnTransformer([('trans', Trans(), col)],
sparse_threshold=0.8)
assert_true(sparse.issparse(ct.fit_transform(X_sparse)))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),
X_res_both)
def test_column_transformer_sparse_stacking():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
col_trans = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', SparseMatrixTrans(), 1)],
sparse_threshold=0.8)
col_trans.fit(X_array)
X_trans = col_trans.transform(X_array)
assert_true(sparse.issparse(X_trans))
assert_equal(X_trans.shape, (X_trans.shape[0], X_trans.shape[0] + 1))
assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0]))
assert len(col_trans.transformers_) == 2
assert col_trans.transformers_[-1][0] != 'remainder'
col_trans = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', SparseMatrixTrans(), 1)],
sparse_threshold=0.1)
col_trans.fit(X_array)
X_trans = col_trans.transform(X_array)
assert not sparse.issparse(X_trans)
assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0]))
def test_column_transformer_sparse_threshold():
X_array = np.array([['a', 'b'], ['A', 'B']], dtype=object).T
# above data has sparsity of 4 / 8 = 0.5
# if all sparse, keep sparse (even if above threshold)
col_trans = ColumnTransformer([('trans1', OneHotEncoder(), [0]),
('trans2', OneHotEncoder(), [1])],
sparse_threshold=0.2)
res = col_trans.fit_transform(X_array)
assert sparse.issparse(res)
assert col_trans.sparse_output_
# mixed -> sparsity of (4 + 2) / 8 = 0.75
for thres in [0.75001, 1]:
col_trans = ColumnTransformer(
[('trans1', OneHotEncoder(sparse=True), [0]),
('trans2', OneHotEncoder(sparse=False), [1])],
sparse_threshold=thres)
res = col_trans.fit_transform(X_array)
assert sparse.issparse(res)
assert col_trans.sparse_output_
for thres in [0.75, 0]:
col_trans = ColumnTransformer(
[('trans1', OneHotEncoder(sparse=True), [0]),
('trans2', OneHotEncoder(sparse=False), [1])],
sparse_threshold=thres)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
# if nothing is sparse -> no sparse
for thres in [0.33, 0, 1]:
col_trans = ColumnTransformer(
[('trans1', OneHotEncoder(sparse=False), [0]),
('trans2', OneHotEncoder(sparse=False), [1])],
sparse_threshold=thres)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
def test_column_transformer_error_msg_1D():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
col_trans = ColumnTransformer([('trans', StandardScaler(), 0)])
assert_raise_message(ValueError, "1D data passed to a transformer",
col_trans.fit, X_array)
assert_raise_message(ValueError, "1D data passed to a transformer",
col_trans.fit_transform, X_array)
col_trans = ColumnTransformer([('trans', TransRaise(), 0)])
for func in [col_trans.fit, col_trans.fit_transform]:
assert_raise_message(ValueError, "specific message", func, X_array)
def test_2D_transformer_output():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([('trans1', 'drop', 0),
('trans2', TransNo2D(), 1)])
assert_raise_message(ValueError, "the 'trans2' transformer should be 2D",
ct.fit_transform, X_array)
# because fit is also doing transform, this raises already on fit
assert_raise_message(ValueError, "the 'trans2' transformer should be 2D",
ct.fit, X_array)
def test_2D_transformer_output_pandas():
pd = pytest.importorskip('pandas')
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['col1', 'col2'])
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([('trans1', TransNo2D(), 'col1')])
assert_raise_message(ValueError, "the 'trans1' transformer should be 2D",
ct.fit_transform, X_df)
# because fit is also doing transform, this raises already on fit
assert_raise_message(ValueError, "the 'trans1' transformer should be 2D",
ct.fit, X_df)
@pytest.mark.parametrize("remainder", ['drop', 'passthrough'])
def test_column_transformer_invalid_columns(remainder):
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# general invalid
for col in [1.5, ['string', 1], slice(1, 's'), np.array([1.])]:
ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder)
assert_raise_message(ValueError, "No valid specification",
ct.fit, X_array)
# invalid for arrays
for col in ['string', ['string', 'other'], slice('a', 'b')]:
ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder)
assert_raise_message(ValueError, "Specifying the columns",
ct.fit, X_array)
def test_column_transformer_invalid_transformer():
class NoTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def predict(self, X):
return X
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
ct = ColumnTransformer([('trans', NoTrans(), [0])])
assert_raise_message(TypeError, "All estimators should implement fit",
ct.fit, X_array)
def test_make_column_transformer():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer(('first', scaler), (['second'], norm))
names, transformers, columns = zip(*ct.transformers)
assert_equal(names, ("standardscaler", "normalizer"))
assert_equal(transformers, (scaler, norm))
assert_equal(columns, ('first', ['second']))
def test_make_column_transformer_kwargs():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer(('first', scaler), (['second'], norm),
n_jobs=3, remainder='drop')
assert_equal(ct.transformers, make_column_transformer(
('first', scaler), (['second'], norm)).transformers)
assert_equal(ct.n_jobs, 3)
assert_equal(ct.remainder, 'drop')
# invalid keyword parameters should raise an error message
assert_raise_message(
TypeError,
'Unknown keyword arguments: "transformer_weights"',
make_column_transformer, ('first', scaler), (['second'], norm),
transformer_weights={'pca': 10, 'Transf': 1}
)
def test_make_column_transformer_remainder_transformer():
scaler = StandardScaler()
norm = Normalizer()
remainder = StandardScaler()
ct = make_column_transformer(('first', scaler), (['second'], norm),
remainder=remainder)
assert ct.remainder == remainder
def test_column_transformer_get_set_params():
ct = ColumnTransformer([('trans1', StandardScaler(), [0]),
('trans2', StandardScaler(), [1])])
exp = {'n_jobs': None,
'remainder': 'drop',
'sparse_threshold': 0.3,
'trans1': ct.transformers[0][1],
'trans1__copy': True,
'trans1__with_mean': True,
'trans1__with_std': True,
'trans2': ct.transformers[1][1],
'trans2__copy': True,
'trans2__with_mean': True,
'trans2__with_std': True,
'transformers': ct.transformers,
'transformer_weights': None}
assert_dict_equal(ct.get_params(), exp)
ct.set_params(trans1__with_mean=False)
assert_false(ct.get_params()['trans1__with_mean'])
ct.set_params(trans1='passthrough')
exp = {'n_jobs': None,
'remainder': 'drop',
'sparse_threshold': 0.3,
'trans1': 'passthrough',
'trans2': ct.transformers[1][1],
'trans2__copy': True,
'trans2__with_mean': True,
'trans2__with_std': True,
'transformers': ct.transformers,
'transformer_weights': None}
assert_dict_equal(ct.get_params(), exp)
def test_column_transformer_named_estimators():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer([('trans1', StandardScaler(), [0]),
('trans2', StandardScaler(with_std=False), [1])])
assert_false(hasattr(ct, 'transformers_'))
ct.fit(X_array)
assert_true(hasattr(ct, 'transformers_'))
assert_true(isinstance(ct.named_transformers_['trans1'], StandardScaler))
assert_true(isinstance(ct.named_transformers_.trans1, StandardScaler))
assert_true(isinstance(ct.named_transformers_['trans2'], StandardScaler))
assert_true(isinstance(ct.named_transformers_.trans2, StandardScaler))
assert_false(ct.named_transformers_.trans2.with_std)
# check it are fitted transformers
assert_equal(ct.named_transformers_.trans1.mean_, 1.)
def test_column_transformer_cloning():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer([('trans', StandardScaler(), [0])])
ct.fit(X_array)
assert_false(hasattr(ct.transformers[0][1], 'mean_'))
assert_true(hasattr(ct.transformers_[0][1], 'mean_'))
ct = ColumnTransformer([('trans', StandardScaler(), [0])])
ct.fit_transform(X_array)
assert_false(hasattr(ct.transformers[0][1], 'mean_'))
assert_true(hasattr(ct.transformers_[0][1], 'mean_'))
def test_column_transformer_get_feature_names():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer([('trans', Trans(), [0, 1])])
# raise correct error when not fitted
assert_raises(NotFittedError, ct.get_feature_names)
# raise correct error when no feature names are available
ct.fit(X_array)
assert_raise_message(AttributeError,
"Transformer trans (type Trans) does not provide "
"get_feature_names", ct.get_feature_names)
# working example
X = np.array([[{'a': 1, 'b': 2}, {'a': 3, 'b': 4}],
[{'c': 5}, {'c': 6}]], dtype=object).T
ct = ColumnTransformer(
[('col' + str(i), DictVectorizer(), i) for i in range(2)])
ct.fit(X)
assert_equal(ct.get_feature_names(), ['col0__a', 'col0__b', 'col1__c'])
# passthrough transformers not supported
ct = ColumnTransformer([('trans', 'passthrough', [0, 1])])
ct.fit(X)
assert_raise_message(
NotImplementedError, 'get_feature_names is not yet supported',
ct.get_feature_names)
ct = ColumnTransformer([('trans', DictVectorizer(), 0)],
remainder='passthrough')
ct.fit(X)
assert_raise_message(
NotImplementedError, 'get_feature_names is not yet supported',
ct.get_feature_names)
# drop transformer
ct = ColumnTransformer(
[('col0', DictVectorizer(), 0), ('col1', 'drop', 1)])
ct.fit(X)
assert_equal(ct.get_feature_names(), ['col0__a', 'col0__b'])
def test_column_transformer_special_strings():
# one 'drop' -> ignore
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer(
[('trans1', Trans(), [0]), ('trans2', 'drop', [1])])
exp = np.array([[0.], [1.], [2.]])
assert_array_equal(ct.fit_transform(X_array), exp)
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# all 'drop' -> return shape 0 array
ct = ColumnTransformer(
[('trans1', 'drop', [0]), ('trans2', 'drop', [1])])
assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0))
assert_array_equal(ct.fit_transform(X_array).shape, (3, 0))
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# 'passthrough'
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer(
[('trans1', Trans(), [0]), ('trans2', 'passthrough', [1])])
exp = X_array
assert_array_equal(ct.fit_transform(X_array), exp)
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# None itself / other string is not valid
for val in [None, 'other']:
ct = ColumnTransformer(
[('trans1', Trans(), [0]), ('trans2', None, [1])])
assert_raise_message(TypeError, "All estimators should implement",
ct.fit_transform, X_array)
assert_raise_message(TypeError, "All estimators should implement",
ct.fit, X_array)
def test_column_transformer_remainder():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
X_res_second = np.array([2, 4, 6]).reshape(-1, 1)
X_res_both = X_array
# default drop
ct = ColumnTransformer([('trans1', Trans(), [0])])
assert_array_equal(ct.fit_transform(X_array), X_res_first)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'drop'
assert_array_equal(ct.transformers_[-1][2], [1])
# specify passthrough
ct = ColumnTransformer([('trans', Trans(), [0])], remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
# column order is not preserved (passed through added to end)
ct = ColumnTransformer([('trans1', Trans(), [1])],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1])
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1])
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [0])
# passthrough when all actual transformers are skipped
ct = ColumnTransformer([('trans1', 'drop', [0])],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_second)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
# error on invalid arg
ct = ColumnTransformer([('trans1', Trans(), [0])], remainder=1)
assert_raise_message(
ValueError,
"remainder keyword needs to be one of \'drop\', \'passthrough\', "
"or estimator.", ct.fit, X_array)
assert_raise_message(
ValueError,
"remainder keyword needs to be one of \'drop\', \'passthrough\', "
"or estimator.", ct.fit_transform, X_array)
# check default for make_column_transformer
ct = make_column_transformer(([0], Trans()))
assert ct.remainder == 'drop'
@pytest.mark.parametrize("key", [[0], np.array([0]), slice(0, 1),
np.array([True, False])])
def test_column_transformer_remainder_numpy(key):
# test different ways that columns are specified with passthrough
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_both = X_array
ct = ColumnTransformer([('trans1', Trans(), key)],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
@pytest.mark.parametrize(
"key", [[0], slice(0, 1), np.array([True, False]), ['first'], 'pd-index',
np.array(['first']), np.array(['first'], dtype=object),
slice(None, 'first'), slice('first', 'first')])
def test_column_transformer_remainder_pandas(key):
# test different ways that columns are specified with passthrough
pd = pytest.importorskip('pandas')
if isinstance(key, six.string_types) and key == 'pd-index':
key = pd.Index(['first'])
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
X_res_both = X_array
ct = ColumnTransformer([('trans1', Trans(), key)],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
@pytest.mark.parametrize("key", [[0], np.array([0]), slice(0, 1),
np.array([True, False, False])])
def test_column_transformer_remainder_transformer(key):
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
X_res_both = X_array.copy()
# second and third columns are doubled when remainder = DoubleTrans
X_res_both[:, 1:3] *= 2
ct = ColumnTransformer([('trans1', Trans(), key)],
remainder=DoubleTrans())
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], DoubleTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_no_remaining_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
ct = ColumnTransformer([('trans1', Trans(), [0, 1, 2])],
remainder=DoubleTrans())
assert_array_equal(ct.fit_transform(X_array), X_array)
assert_array_equal(ct.fit(X_array).transform(X_array), X_array)
assert len(ct.transformers_) == 1
assert ct.transformers_[-1][0] != 'remainder'
def test_column_transformer_drops_all_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
# columns are doubled when remainder = DoubleTrans
X_res_both = 2 * X_array.copy()[:, 1:3]
ct = ColumnTransformer([('trans1', 'drop', [0])],
remainder=DoubleTrans())
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], DoubleTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_sparse_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
ct = ColumnTransformer([('trans1', Trans(), [0])],
remainder=SparseMatrixTrans(),
sparse_threshold=0.8)
X_trans = ct.fit_transform(X_array)
assert sparse.issparse(X_trans)
# SparseMatrixTrans creates 3 features for each column. There is
# one column in ``transformers``, thus:
assert X_trans.shape == (3, 3 + 1)
exp_array = np.hstack(
(X_array[:, 0].reshape(-1, 1), np.eye(3)))
assert_array_equal(X_trans.toarray(), exp_array)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_drop_all_sparse_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
ct = ColumnTransformer([('trans1', 'drop', [0])],
remainder=SparseMatrixTrans(),
sparse_threshold=0.8)
X_trans = ct.fit_transform(X_array)
assert sparse.issparse(X_trans)
# SparseMatrixTrans creates 3 features for each column, thus:
assert X_trans.shape == (3, 3)
assert_array_equal(X_trans.toarray(), np.eye(3))
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_get_set_params_with_remainder():
ct = ColumnTransformer([('trans1', StandardScaler(), [0])],
remainder=StandardScaler())
exp = {'n_jobs': None,
'remainder': ct.remainder,
'remainder__copy': True,
'remainder__with_mean': True,
'remainder__with_std': True,
'sparse_threshold': 0.3,
'trans1': ct.transformers[0][1],
'trans1__copy': True,
'trans1__with_mean': True,
'trans1__with_std': True,
'transformers': ct.transformers,
'transformer_weights': None}
assert ct.get_params() == exp
ct.set_params(remainder__with_std=False)
assert not ct.get_params()['remainder__with_std']
ct.set_params(trans1='passthrough')
exp = {'n_jobs': None,
'remainder': ct.remainder,
'remainder__copy': True,
'remainder__with_mean': True,
'remainder__with_std': False,
'sparse_threshold': 0.3,
'trans1': 'passthrough',
'transformers': ct.transformers,
'transformer_weights': None}
assert ct.get_params() == exp
def test_column_transformer_no_estimators():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).astype('float').T
ct = ColumnTransformer([], remainder=StandardScaler())
params = ct.get_params()
assert params['remainder__with_mean']
X_trans = ct.fit_transform(X_array)
assert X_trans.shape == X_array.shape
assert len(ct.transformers_) == 1
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][2] == [0, 1, 2]
def test_column_transformer_no_estimators_set_params():
ct = ColumnTransformer([]).set_params(n_jobs=2)
assert ct.n_jobs == 2
def test_column_transformer_callable_specifier():
# assert that function gets the full array / dataframe
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first = np.array([[0, 1, 2]]).T
def func(X):
assert_array_equal(X, X_array)
return [0]
ct = ColumnTransformer([('trans', Trans(), func)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_array), X_res_first)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
pd = pytest.importorskip('pandas')
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
def func(X):
assert_array_equal(X.columns, X_df.columns)
assert_array_equal(X.values, X_df.values)
return ['first']
ct = ColumnTransformer([('trans', Trans(), func)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_df), X_res_first)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first)
|
|
from collections.abc import Sequence
import numpy as np
import pytest
try:
import rowan
skip_rowan = False
except ImportError:
skip_rowan = True
import hoomd
import hoomd.md as md
skip_rowan = pytest.mark.skipif(skip_rowan, reason="rowan cannot be imported.")
@pytest.fixture
def valid_body_definition():
return {
"constituent_types": ["B", "B", "B", "B"],
"positions": [
[1, 0, -1 / (2**(1. / 2.))],
[-1, 0, -1 / (2**(1. / 2.))],
[0, -1, 1 / (2**(1. / 2.))],
[0, 1, 1 / (2**(1. / 2.))],
],
"orientations": [(1.0, 0.0, 0.0, 0.0)] * 4,
"charges": [0.0, 1.0, 2.0, 3.5],
"diameters": [1.0, 1.5, 0.5, 1.0]
}
def test_body_setting(valid_body_definition):
invalid_body_definitions = {
"constituent_types": [[4], "hello", ("A", 4)],
"positions": [[(1, 2)], [(1.0, 4.0, "foo")], 1.0, "hello"],
"orientations": [[(1, 2, 3)], [(1.0, 4.0, 5.0, "foo")], [1.0], 1.0,
"foo"],
"charges": [0.0, ["foo"]],
"diameters": [1.0, "foo", ["foo"]]
}
rigid = md.constrain.Rigid()
current_body_definition = {**valid_body_definition}
rigid.body["A"] = current_body_definition
for key, value in rigid.body["A"].items():
if (isinstance(value, Sequence) and len(value) > 0
and not isinstance(value[0], str)):
assert np.allclose(value, current_body_definition[key])
else:
assert value == current_body_definition[key]
# Test dictionaries with a single invalid input
for key, values in invalid_body_definitions.items():
for invalid_value in values:
current_body_definition[key] = invalid_value
with pytest.raises(hoomd.data.typeconverter.TypeConversionError):
rigid.body["A"] = current_body_definition
# Reset the body definition to a valid state to ensure only one key is
# ever invalid.
current_body_definition[key] = valid_body_definition[key]
def check_bodies(snapshot, definition):
"""Non-general assumes a snapshot from two_particle_snapshot_factory.
This is just to prevent duplication of code from test_create_bodies and
test_running_simulation.
"""
assert snapshot.particles.N == 10
assert all(snapshot.particles.typeid[3:] == 1)
assert snapshot.particles.body[0] == 0
assert all(snapshot.particles.body[2:6] == 0)
assert snapshot.particles.body[1] == 1
assert all(snapshot.particles.body[6:] == 1)
# check charges
for i in range(4):
assert snapshot.particles.charge[i + 2] == definition["charges"][i]
assert snapshot.particles.charge[i + 6] == definition["charges"][i]
# check diameters
for i in range(4):
assert snapshot.particles.diameter[i + 2] == definition["diameters"][i]
assert snapshot.particles.diameter[i + 6] == definition["diameters"][i]
particle_one = (snapshot.particles.position[0],
snapshot.particles.orientation[0])
particle_two = (snapshot.particles.position[1],
snapshot.particles.orientation[1])
# Check positions
def check_position(central_position, central_orientation,
constituent_position, local_position):
d_pos = rowan.rotate(central_orientation, local_position)
assert np.allclose(central_position + d_pos, constituent_position)
for i in range(4):
check_position(*particle_one, snapshot.particles.position[i + 2],
definition["positions"][i])
check_position(*particle_two, snapshot.particles.position[i + 6],
definition["positions"][i])
# check orientation
def check_orientation(central_orientation, constituent_orientation,
local_orientation):
expected_orientation = rowan.normalize(
rowan.multiply(central_orientation, local_orientation))
assert np.allclose(expected_orientation, local_orientation)
for i in range(4):
check_orientation(particle_one[1],
snapshot.particles.orientation[i + 2],
definition["orientations"][i])
check_orientation(particle_two[1],
snapshot.particles.orientation[i + 6],
definition["orientations"][i])
@skip_rowan
def test_create_bodies(simulation_factory, two_particle_snapshot_factory,
lattice_snapshot_factory, valid_body_definition):
rigid = md.constrain.Rigid()
rigid.body["A"] = valid_body_definition
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
rigid.create_bodies(sim.state)
snapshot = sim.state.get_snapshot()
if snapshot.communicator.rank == 0:
check_bodies(snapshot, valid_body_definition)
sim.operations.integrator = hoomd.md.Integrator(dt=0.005, rigid=rigid)
# Ensure validate bodies passes
sim.run(0)
# Second test with more general testing
# detach rigid
sim.operations.integrator.rigid = None
initial_snapshot = lattice_snapshot_factory(n=10)
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["C", "A", "B"]
# Grab the middle particles and a random one to ensure that particle
# type ordering with respect to particle tag does not matter for
# create_bodies.
initial_snapshot.particles.typeid[100:800] = 1
initial_snapshot.particles.typeid[55] = 1
sim = simulation_factory(initial_snapshot)
rigid.create_bodies(sim.state)
snapshot = sim.state.get_snapshot()
if snapshot.communicator.rank == 0:
# Check central particles
central_tags = np.empty(701, dtype=int)
central_tags[0] = 55
central_tags[1:] = np.arange(100, 800)
print
assert np.all(snapshot.particles.body[central_tags] == central_tags)
# Check free bodies
assert np.all(snapshot.particles.body[:55] == -1)
assert np.all(snapshot.particles.body[56:100] == -1)
assert np.all(snapshot.particles.body[800:1000] == -1)
# Check constituent_particles
assert np.all(
snapshot.particles.body[1000:] == np.repeat(central_tags, 4))
sim.operations.integrator = hoomd.md.Integrator(dt=0.005, rigid=rigid)
# Ensure validate bodies passes
sim.run(0)
def test_attaching(simulation_factory, two_particle_snapshot_factory,
valid_body_definition):
rigid = md.constrain.Rigid()
rigid.body["A"] = valid_body_definition
langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid())
integrator = md.Integrator(dt=0.005, methods=[langevin])
integrator.rigid = rigid
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
rigid.create_bodies(sim.state)
sim.operations += integrator
sim.run(0)
for key, value in rigid.body["A"].items():
if (isinstance(value, Sequence) and len(value) > 0
and not isinstance(value[0], str)):
assert np.allclose(value, valid_body_definition[key])
else:
assert value == valid_body_definition[key]
@pytest.mark.serial
def test_error_on_invalid_body(simulation_factory,
two_particle_snapshot_factory,
valid_body_definition):
rigid = md.constrain.Rigid()
rigid.body["A"] = valid_body_definition
langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid())
integrator = md.Integrator(dt=0.005, methods=[langevin])
integrator.rigid = rigid
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
sim.operations += integrator
with pytest.raises(RuntimeError):
sim.run(0)
@skip_rowan
def test_running_simulation(simulation_factory, two_particle_snapshot_factory,
valid_body_definition):
rigid = md.constrain.Rigid()
rigid.body["A"] = valid_body_definition
langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid())
lj = hoomd.md.pair.LJ(nlist=md.nlist.Cell(), mode="shift")
lj.params.default = {"epsilon": 0.0, "sigma": 1}
lj.params[("A", "A")] = {"epsilon": 1.0}
lj.params[("B", "B")] = {"epsilon": 1.0}
lj.r_cut.default = 2**(1.0 / 6.0)
integrator = md.Integrator(dt=0.005, methods=[langevin], forces=[lj])
integrator.rigid = rigid
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
sim.seed = 5
rigid.create_bodies(sim.state)
sim.operations += integrator
sim.run(5)
snapshot = sim.state.get_snapshot()
if sim.device.communicator.rank == 0:
check_bodies(snapshot, valid_body_definition)
def test_running_without_body_definition(simulation_factory,
two_particle_snapshot_factory):
rigid = md.constrain.Rigid()
langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid())
lj = hoomd.md.pair.LJ(nlist=md.nlist.Cell(), mode="shift")
lj.params.default = {"epsilon": 0.0, "sigma": 1}
lj.params[("A", "A")] = {"epsilon": 1.0}
lj.params[("B", "B")] = {"epsilon": 1.0}
lj.r_cut.default = 2**(1.0 / 6.0)
integrator = md.Integrator(dt=0.005, methods=[langevin], forces=[lj])
integrator.rigid = rigid
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
sim.seed = 5
sim.operations += integrator
sim.run(1)
@pytest.mark.serial
def test_setting_body_after_attaching(simulation_factory,
two_particle_snapshot_factory,
valid_body_definition):
rigid = md.constrain.Rigid()
langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid())
lj = hoomd.md.pair.LJ(nlist=md.nlist.Cell(), mode="shift")
lj.params.default = {"epsilon": 0.0, "sigma": 1}
lj.params[("A", "A")] = {"epsilon": 1.0}
lj.params[("B", "B")] = {"epsilon": 1.0}
lj.r_cut.default = 2**(1.0 / 6.0)
integrator = md.Integrator(dt=0.005, methods=[langevin], forces=[lj])
integrator.rigid = rigid
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
sim.seed = 5
sim.operations += integrator
sim.run(1)
rigid.body["A"] = valid_body_definition
# This should error because the bodies have not been updated, but the
# setting should be fine.
with pytest.raises(RuntimeError):
sim.run(1)
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 22:34:46 2016
"""
#!/usr/bin/env python
import json
import socket
import smbus
import os
import hashlib
import httplib
import time
import threading
from M2Crypto import m2
from M2Crypto import RSA
class AVRChip(object):
"""
Class for communicating with the AVR on the CryptoCape.
"""
AVR_ADDR = 0x42
def __init__(self):
self.bus = smbus.SMBus(1) # /dev/i2c-1
def led_on(self):
self.bus.write_byte(AVRChip.AVR_ADDR, 0x02)
def led_off(self):
self.bus.write_byte(AVRChip.AVR_ADDR, 0x03)
def get_status(self):
#self.bus.write_byte(self.AVR_ADDR, 0x01)
status_list = self.getByte()
return status_list
def send_data(self, data):
return self.bus.write_i2c_block_data(self.AVR_ADDR, data[0], data[1:])
def resetStatus(self):
return self.bus.write_i2c_block_data(self.AVR_ADDR, 12, [1,1,1])
def getByte(self):
bite = self.bus.read_byte(self.AVR_ADDR)
return bite
def sendSecret(self,secret):
print 'sending secret'
pack1 = [0x02, 0x00, 0x00] + secret[:16]
pack2 = [0x02, 0x01, 0x10] + secret[16:]
print(pack1)
print(pack2)
self.send_data(pack1)
self.send_data(pack2)
return True
def sendUnlockChal(self, challenge):
print 'sending unlock challenge'
pack1 = [0x03, 0x00, 0x00] + challenge[:16]
pack2 = [0x03, 0x01, 0x10] + challenge[16:]
self.send_data(pack1)
self.send_data(pack2)
return True
def sendPinChal(self, challenge):
print 'sending pin challenge'
pack1 = [0x04, 0x00, 0x00] + challenge[:16]
pack2 = [0x04, 0x01, 0x10] + challenge[16:]
self.send_data(pack1)
self.send_data(pack2)
return True
class ARM(object):
def __init__(self, avr):
self.avr = avr
try:
f = open('fingerprint')
self.fpr = f.read()
f.close()
except:
print("device is not registered")
def registration(self):
print 'registration reached'
self.fpr = os.urandom(32).encode('hex')
payload = json.dumps({'fingerprint': self.fpr})
headers = {"Content-type": "application/json"}
c = httplib.HTTPConnection('192.168.7.1:5000')
c.request("POST", "/registration_1", payload, headers) #ARM sends public key to server
r = c.getresponse() # Server receives public key
if r.status == 200: #Verification from Server
f = open('fingerprint', 'w')
f.write(self.fpr)
f.close()
d = r.read()
data = json.loads(d) #ARM receives the public key
sPubkey = data['DHPub']
prime = data['DHPrime']
sPubkey = int(sPubkey, 16)
prime = int(prime, 16)
pk = self.genPrivateKey(256) #32 byte private
generator = 2
dhpub = pow(generator, pk, prime)
dhpubh = hex(dhpub)[2:-1]
ss = pow(sPubkey, pk, prime)
ss = [ord(x) for x in list(hashlib.sha256(hex(ss)[2:-1]).digest())]
self.avr.sendSecret(ss)
else:
return False
payload = json.dumps({'fingerprint': self.fpr, 'DHPub': dhpubh})
c = httplib.HTTPConnection('192.168.7.1:5000')
c.request("POST", "/registration_2", payload, headers) #ARM sends server DHPub Value
r = c.getresponse()
if r.status == 200:
print('registration complete')
return True #Registration Complete
else:
print('registration failed')
return False
def unlock(self):
fpr = self.fpr
challenge = json.dumps({'fingerprint': fpr})
headers = {"Content-type": "application/json"}
c = httplib.HTTPConnection('192.168.7.1:5000')
c.request("POST", "/start_unlock", challenge, headers)
r = c.getresponse()
if r.status == 200: # Making sure communication with the server is performed correctly
d = r.read()
data = json.loads(d)
challenge = data['challenge']
from binascii import unhexlify as unhex
challenge = [ord(x) for x in list(unhex(challenge))]
self.avr.sendUnlockChal(challenge)
# AVR creates HMAC(key, challenge + pin) and sends it to ARM
hmac = []
while self.avr.get_status() != 0xA4:
pass
for i in range(32):
hmac.append(self.avr.getByte())
hmac = "".join([ hex(x)[2:] if len(hex(x)) == 4 else "0"+hex(x)[2] for x in hmac])
# POST /unlock
unlocking = json.dumps({'hash': hmac, 'fingerprint': fpr})
c = httplib.HTTPConnection('192.168.7.1:5000')
c.request("POST", "/unlock", unlocking, headers)
r = c.getresponse()
if r.status == 200: #Server validates the request
d = r.read()
data = json.loads(d)
flag = data['flag']
return flag #Unlock Complete
else:
return False
else:
return False
def changePIN(self):
##fpr is a global variable
fpr = self.fpr
challenge = json.dumps({'fingerprint': fpr})
headers = {"Content-type": "application/json"}
c = httplib.HTTPConnection('192.168.7.1:5000')
c.request("POST", "/start_pin_change", challenge, headers)
r = c.getresponse()
if r.status == 200:
d = r.read()
data = json.loads(d)
from binascii import unhexlify as unhex
challenge = data['challenge']
challenge = [ord(x) for x in list(unhex(challenge))]
self.avr.sendPinChal(challenge)
# AVR creates HMAC(key, challenge + pin) and sends it to ARM
from time import sleep
sleep(0.5)
hmac = []
while self.avr.get_status() != 0xA7:
# read in hmac
pass
for i in range(64):
hmac.append(self.avr.get_status())
#hmac.append(self.avr.getByte())
hmac = "".join([ hex(x)[2:] if len(hex(x)) == 4 else "0"+hex(x)[2] for x in hmac])
data = json.dumps({'hash': hmac})
headers = {"Content-type": "application/json"}
c = httplib.HTTPConnection('192.168.7.1:5000')
pin_change = json.dumps({'hash': hmac, 'fingerprint': fpr})
c.request("POST", '/pin_change', pin_change, headers)
r = c.getresponse()
if r.status == 200: #Server verifies the signature
return True #changePIN Complete
else:
return False
else:
return False
def genRandom(self, bits):
"""
Generate a random number with the specified number of bits
"""
_rand = 0
_bytes = bits // 8
while(_rand.bit_length() < bits):
# Python 2
_rand = int(os.urandom(_bytes).encode('hex'), 16)
return _rand
def genPrivateKey(self, bits):
"""
Generate a private key using a secure random number generator.
"""
return self.genRandom(bits)
class Logger(object):
"""
Logs information to connections on port 6000. Simple way of accessing logs
using netcat:
nc 192.168.7.2 6000
"""
LOGGER_PORT = 6000
def __init__(self):
self.listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.conns = []
self.thread = threading.Thread(target = self.accept_thread)
self.thread.start()
def accept_thread(self):
"""
Simple thread that waits for connections and appends them to the list
as they come in.
"""
self.listen_socket.bind(('', Logger.LOGGER_PORT))
self.listen_socket.listen(1)
while True:
try:
conn, _ = self.listen_socket.accept()
self.conns.append(conn)
except:
break
def close(self):
# forces accept() in accept_thread to raise an exception
self.listen_socket.shutdown(socket.SHUT_RDWR)
self.listen_socket.close()
self.thread.join()
def message(self, msg):
bad_conns = []
for conn in self.conns:
try:
conn.sendall(msg + "\n")
except socket.error:
bad_conns.append(conn)
for bad_conn in bad_conns:
self.conns.remove(bad_conn)
def info(self, msg):
self.message("INFO: " + msg)
def error(self, msg):
self.message("Error: " + msg)
def avr_indicate_success(avr):
"""
Indicate a successful operation by turning on the LED for 3 seconds.
"""
os.system('sh correct.sh')
def avr_indicate_failure(avr):
"""
Indicate a failure by blinking the LED quickly 3 times.
"""
os.system('sh incorrect.sh')
def main():
"""
Main program loop. Sets up the connections to the AVR and the server, then
reads key presses and sends them to the server.
"""
avr = AVRChip()
arm = ARM(avr)
#avr.led_off()
logger = Logger()
while (True):
c = avr.get_status()
if c == 0xA1:
print c
if arm.registration():
logger.info('Registration successful')
avr_indicate_success(avr)
else:
logger.info('Registration unsuccesful')
avr_indicate_failure(avr)
avr.resetStatus()
if c == 0xA2:
print c
flag = arm.unlock()
if flag:
print 'unlock succcess'
logger.info('Unlock successful %s' % flag)
avr_indicate_success(avr)
else:
print 'unlock failure'
logger.info('Unlock unsuccessful')
avr_indicate_failure(avr)
avr.resetStatus()
if c == 0xA5:
print c
if arm.changePIN():
print 'change pin succesful'
logger.info('Pin change succesful')
avr_indicate_success(avr)
else:
logger.info('Unlock/Changepin failure')
avr_indicate_failure(avr)
avr.resetStatus()
# except KeyboardInterrupt:
# pass
# finally:
# logger.close()
if __name__ == '__main__':
time.sleep(5)
os.system('sh program_avr.sh')
time.sleep(5)
while True:
try:
main()
except:
pass
|
|
import gzip
import json
import logging
import os
import tempfile
import zipfile
import pytest
import retrying
import test_helpers
from dcos_test_utils.diagnostics import Diagnostics
from dcos_test_utils.helpers import check_json
__maintainer__ = 'mnaboka'
__contact__ = '[email protected]'
# Expected latency for all dcos-diagnostics units to refresh after postflight plus
# another minute to allow for check-time to settle. See: DCOS_OSS-988
LATENCY = 120
@pytest.mark.supportedwindows
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def test_dcos_diagnostics_health(dcos_api_session):
"""
test health endpoint /system/health/v1
"""
required_fields = ['units', 'hostname', 'ip', 'dcos_version', 'node_role', 'mesos_id', 'dcos_diagnostics_version']
required_fields_unit = ['id', 'health', 'output', 'description', 'help', 'name']
# Check all masters dcos-diagnostics instances on base port since this is extra-cluster request (outside localhost)
for host in dcos_api_session.masters:
response = check_json(dcos_api_session.health.get('/', node=host))
assert len(response) == len(required_fields), 'response must have the following fields: {}'.format(
', '.join(required_fields)
)
# validate units
assert 'units' in response, 'units field not found'
assert isinstance(response['units'], list), 'units field must be a list'
assert len(response['units']) > 0, 'units field cannot be empty'
for unit in response['units']:
assert len(unit) == len(required_fields_unit), 'unit must have the following fields: {}'.format(
', '.join(required_fields_unit)
)
for required_field_unit in required_fields_unit:
assert required_field_unit in unit, '{} must be in a unit repsonse'
# id, health and description cannot be empty
assert unit['id'], 'id field cannot be empty'
assert unit['health'] in [0, 1], 'health field must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
# check all required fields but units
for required_field in required_fields[1:]:
assert required_field in response, '{} field not found'.format(required_field)
assert response[required_field], '{} cannot be empty'.format(required_field)
# Check all agents running dcos-diagnostics behind agent-adminrouter on 61001
for host in dcos_api_session.slaves:
response = check_json(dcos_api_session.health.get('/', node=host))
assert len(response) == len(required_fields), 'response must have the following fields: {}'.format(
', '.join(required_fields)
)
# validate units
assert 'units' in response, 'units field not found'
assert isinstance(response['units'], list), 'units field must be a list'
assert len(response['units']) > 0, 'units field cannot be empty'
for unit in response['units']:
assert len(unit) == len(required_fields_unit), 'unit must have the following fields: {}'.format(
', '.join(required_fields_unit)
)
for required_field_unit in required_fields_unit:
assert required_field_unit in unit, '{} must be in a unit repsonse'
# id, health and description cannot be empty
assert unit['id'], 'id field cannot be empty'
assert unit['health'] in [0, 1], 'health field must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
# check all required fields but units
for required_field in required_fields[1:]:
assert required_field in response, '{} field not found'.format(required_field)
assert response[required_field], '{} cannot be empty'.format(required_field)
@pytest.mark.supportedwindows
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def test_dcos_diagnostics_nodes(dcos_api_session):
"""
test a list of nodes with statuses endpoint /system/health/v1/nodes
"""
for master in dcos_api_session.masters:
response = check_json(dcos_api_session.health.get('/nodes', node=master))
assert len(response) == 1, 'nodes response must have only one field: nodes'
assert 'nodes' in response
assert isinstance(response['nodes'], list)
assert len(response['nodes']) == len(dcos_api_session.masters + dcos_api_session.all_slaves), \
('a number of nodes in response must be {}'.
format(len(dcos_api_session.masters + dcos_api_session.all_slaves)))
# test nodes
validate_node(response['nodes'])
@pytest.mark.supportedwindows
def test_dcos_diagnostics_nodes_node(dcos_api_session):
"""
test a specific node enpoint /system/health/v1/nodes/<node>
"""
for master in dcos_api_session.masters:
# get a list of nodes
response = check_json(dcos_api_session.health.get('/nodes', node=master))
nodes = list(map(lambda node: node['host_ip'], response['nodes']))
for node in nodes:
node_response = check_json(dcos_api_session.health.get('/nodes/{}'.format(node), node=master))
validate_node([node_response])
@pytest.mark.supportedwindows
def test_dcos_diagnostics_nodes_node_units(dcos_api_session):
"""
test a list of units from a specific node, endpoint /system/health/v1/nodes/<node>/units
"""
for master in dcos_api_session.masters:
# get a list of nodes
response = check_json(dcos_api_session.health.get('/nodes', node=master))
nodes = list(map(lambda node: node['host_ip'], response['nodes']))
for node in nodes:
units_response = check_json(dcos_api_session.health.get('/nodes/{}/units'.format(node), node=master))
assert len(units_response) == 1, 'unit response should have only 1 field `units`'
assert 'units' in units_response
validate_units(units_response['units'])
@pytest.mark.supportedwindows
def test_dcos_diagnostics_nodes_node_units_unit(dcos_api_session):
"""
test a specific unit for a specific node, endpoint /system/health/v1/nodes/<node>/units/<unit>
"""
for master in dcos_api_session.masters:
response = check_json(dcos_api_session.health.get('/nodes', node=master))
nodes = list(map(lambda node: node['host_ip'], response['nodes']))
for node in nodes:
units_response = check_json(dcos_api_session.health.get('/nodes/{}/units'.format(node), node=master))
unit_ids = list(map(lambda unit: unit['id'], units_response['units']))
for unit_id in unit_ids:
validate_unit(
check_json(dcos_api_session.health.get('/nodes/{}/units/{}'.format(node, unit_id), node=master)))
@pytest.mark.supportedwindows
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def test_dcos_diagnostics_units(dcos_api_session):
"""
test a list of collected units, endpoint /system/health/v1/units
"""
# get all unique unit names
all_units = set()
for node in dcos_api_session.masters:
node_response = check_json(dcos_api_session.health.get('/', node=node))
for unit in node_response['units']:
all_units.add(unit['id'])
for node in dcos_api_session.all_slaves:
node_response = check_json(dcos_api_session.health.get('/', node=node))
for unit in node_response['units']:
all_units.add(unit['id'])
# test against masters
for master in dcos_api_session.masters:
units_response = check_json(dcos_api_session.health.get('/units', node=master))
validate_units(units_response['units'])
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
logging.info('collected units: {}'.format(pulled_units))
diff = set(pulled_units).symmetric_difference(all_units)
assert set(pulled_units) == all_units, ('not all units have been collected by dcos-diagnostics '
'puller, missing: {}'.format(diff))
@pytest.mark.supportedwindows
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def test_systemd_units_health(dcos_api_session):
"""
test all units and make sure the units are healthy. This test will fail if any of systemd unit is unhealthy,
meaning it focuses on making sure the dcos_api_session is healthy, rather then testing dcos-diagnostics itself.
"""
unhealthy_output = []
assert dcos_api_session.masters, "Must have at least 1 master node"
report_response = check_json(dcos_api_session.health.get('/report', node=dcos_api_session.masters[0]))
assert 'Units' in report_response, "Missing `Units` field in response"
for unit_name, unit_props in report_response['Units'].items():
assert 'Health' in unit_props, "Unit {} missing `Health` field".format(unit_name)
if unit_props['Health'] != 0:
assert 'Nodes' in unit_props, "Unit {} missing `Nodes` field".format(unit_name)
assert isinstance(unit_props['Nodes'], list), 'Field `Node` must be a list'
for node in unit_props['Nodes']:
assert 'Health' in node, 'Field `Health` is expected to be in nodes properties, got {}'.format(node)
if node['Health'] != 0:
assert 'Output' in node, 'Field `Output` is expected to be in nodes properties, got {}'.format(node)
assert isinstance(node['Output'], dict), 'Field `Output` must be a dict'
assert unit_name in node['Output'], 'unit {} must be in node Output, got {}'.format(unit_name,
node['Output'])
assert 'IP' in node, 'Field `IP` is expected to be in nodes properties, got {}'.format(node)
unhealthy_output.append(
'Unhealthy unit {} has been found on node {}, health status {}. journalctl output {}'.format(
unit_name, node['IP'], unit_props['Health'], node['Output'][unit_name]))
if unhealthy_output:
raise AssertionError('\n'.join(unhealthy_output))
@pytest.mark.supportedwindows
def test_dcos_diagnostics_units_unit(dcos_api_session):
"""
test a unit response in a right format, endpoint: /system/health/v1/units/<unit>
"""
for master in dcos_api_session.masters:
units_response = check_json(dcos_api_session.health.get('/units', node=master))
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
for unit in pulled_units:
unit_response = check_json(dcos_api_session.health.get('/units/{}'.format(unit), node=master))
validate_units([unit_response])
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def test_dcos_diagnostics_units_unit_nodes(dcos_api_session):
"""
test a list of nodes for a specific unit, endpoint /system/health/v1/units/<unit>/nodes
"""
def get_nodes_from_response(response):
assert 'nodes' in response, 'response must have field `nodes`. Got {}'.format(response)
nodes_ip_map = make_nodes_ip_map(dcos_api_session)
nodes = []
for node in response['nodes']:
assert 'host_ip' in node, 'node response must have `host_ip` field. Got {}'.format(node)
assert node['host_ip'] in nodes_ip_map, 'nodes_ip_map must have node {}.Got {}'.format(node['host_ip'],
nodes_ip_map)
nodes.append(nodes_ip_map.get(node['host_ip']))
return nodes
for master in dcos_api_session.masters:
units_response = check_json(dcos_api_session.health.get('/units', node=master))
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
for unit in pulled_units:
nodes_response = check_json(dcos_api_session.health.get('/units/{}/nodes'.format(unit), node=master))
validate_node(nodes_response['nodes'])
# make sure dcos-mesos-master.service has master nodes and dcos-mesos-slave.service has agent nodes
master_nodes_response = check_json(
dcos_api_session.health.get('/units/dcos-mesos-master.service/nodes', node=master))
master_nodes = get_nodes_from_response(master_nodes_response)
assert len(master_nodes) == len(dcos_api_session.masters), \
'{} != {}'.format(master_nodes, dcos_api_session.masters)
assert set(master_nodes) == set(dcos_api_session.masters), 'a list of difference: {}'.format(
set(master_nodes).symmetric_difference(set(dcos_api_session.masters))
)
agent_nodes_response = check_json(
dcos_api_session.health.get('/units/dcos-mesos-slave.service/nodes', node=master))
agent_nodes = get_nodes_from_response(agent_nodes_response)
assert len(agent_nodes) == len(dcos_api_session.slaves), '{} != {}'.format(agent_nodes, dcos_api_session.slaves)
@pytest.mark.supportedwindows
def test_dcos_diagnostics_units_unit_nodes_node(dcos_api_session):
"""
test a specific node for a specific unit, endpoint /system/health/v1/units/<unit>/nodes/<node>
"""
required_node_fields = ['host_ip', 'health', 'role', 'output', 'help']
for master in dcos_api_session.masters:
units_response = check_json(dcos_api_session.health.get('/units', node=master))
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
for unit in pulled_units:
nodes_response = check_json(dcos_api_session.health.get('/units/{}/nodes'.format(unit), node=master))
pulled_nodes = list(map(lambda node: node['host_ip'], nodes_response['nodes']))
logging.info('pulled nodes: {}'.format(pulled_nodes))
for node in pulled_nodes:
node_response = check_json(
dcos_api_session.health.get('/units/{}/nodes/{}'.format(unit, node), node=master))
assert len(node_response) == len(required_node_fields), 'required fields: {}'.format(
', '.format(required_node_fields)
)
for required_node_field in required_node_fields:
assert required_node_field in node_response, 'field {} must be set'.format(required_node_field)
# host_ip, health, role, help cannot be empty
assert node_response['host_ip'], 'host_ip field cannot be empty'
assert node_response['health'] in [0, 1], 'health must be 0 or 1'
assert node_response['role'], 'role field cannot be empty'
assert node_response['help'], 'help field cannot be empty'
@pytest.mark.supportedwindows
def test_dcos_diagnostics_report(dcos_api_session):
"""
test dcos-diagnostics report endpoint /system/health/v1/report
"""
for master in dcos_api_session.masters:
report_response = check_json(dcos_api_session.health.get('/report', node=master))
assert 'Units' in report_response
assert len(report_response['Units']) > 0
assert 'Nodes' in report_response
assert len(report_response['Nodes']) > 0
def test_dcos_diagnostics_bundle_create_download_delete(dcos_api_session):
"""
test bundle create, read, delete workflow
"""
app, test_uuid = test_helpers.marathon_test_app()
with dcos_api_session.marathon.deploy_and_cleanup(app):
bundle = _create_bundle(dcos_api_session)
_check_diagnostics_bundle_status(dcos_api_session)
_download_and_extract_bundle(dcos_api_session, bundle)
_download_and_extract_bundle_from_another_master(dcos_api_session, bundle)
_delete_bundle(dcos_api_session, bundle)
def _check_diagnostics_bundle_status(dcos_api_session):
# validate diagnostics job status response
diagnostics_bundle_status = check_json(dcos_api_session.health.get('/report/diagnostics/status/all'))
required_status_fields = ['is_running', 'status', 'last_bundle_dir', 'job_started',
'diagnostics_bundle_dir', 'diagnostics_job_timeout_min',
'journald_logs_since_hours', 'diagnostics_job_get_since_url_timeout_min',
'command_exec_timeout_sec', 'diagnostics_partition_disk_usage_percent',
'job_progress_percentage']
for _, properties in diagnostics_bundle_status.items():
for required_status_field in required_status_fields:
assert required_status_field in properties, 'property {} not found'.format(required_status_field)
def _create_bundle(dcos_api_session):
last_datapoint = {
'time': None,
'value': 0
}
health_url = dcos_api_session.default_url.copy(
query='cache=0',
path='system/health/v1',
)
diagnostics = Diagnostics(
default_url=health_url,
masters=dcos_api_session.masters,
all_slaves=dcos_api_session.all_slaves,
session=dcos_api_session.copy().session,
)
create_response = diagnostics.start_diagnostics_job().json()
diagnostics.wait_for_diagnostics_job(last_datapoint=last_datapoint)
diagnostics.wait_for_diagnostics_reports()
bundles = diagnostics.get_diagnostics_reports()
assert len(bundles) == 1, 'bundle file not found'
assert bundles[0] == create_response['extra']['bundle_name']
return create_response['extra']['bundle_name']
def _delete_bundle(dcos_api_session, bundle):
health_url = dcos_api_session.default_url.copy(
query='cache=0',
path='system/health/v1',
)
diagnostics = Diagnostics(
default_url=health_url,
masters=dcos_api_session.masters,
all_slaves=dcos_api_session.all_slaves,
session=dcos_api_session.copy().session,
)
bundles = diagnostics.get_diagnostics_reports()
assert bundle in bundles, 'not found {} in {}'.format(bundle, bundles)
dcos_api_session.health.post(os.path.join('/report/diagnostics/delete', bundle))
bundles = diagnostics.get_diagnostics_reports()
assert bundle not in bundles, 'found {} in {}'.format(bundle, bundles)
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def _download_and_extract_bundle(dcos_api_session, bundle):
_download_bundle_from_master(dcos_api_session, 0, bundle)
@retrying.retry(wait_fixed=2000, stop_max_delay=LATENCY * 1000)
def _download_and_extract_bundle_from_another_master(dcos_api_session, bundle):
if len(dcos_api_session.masters) > 1:
_download_bundle_from_master(dcos_api_session, 1, bundle)
def _download_bundle_from_master(dcos_api_session, master_index, bundle):
""" Download DC/OS diagnostics bundle from a master
:param dcos_api_session: dcos_api_session fixture
:param master_index: master index from dcos_api_session.masters array
:param bundle: bundle name to download from master
"""
assert len(dcos_api_session.masters) >= master_index + 1, '{} masters required. Got {}'.format(
master_index + 1, len(dcos_api_session.masters))
health_url = dcos_api_session.default_url.copy(
query='cache=0',
path='system/health/v1',
)
diagnostics = Diagnostics(
default_url=health_url,
masters=dcos_api_session.masters,
all_slaves=dcos_api_session.all_slaves,
session=dcos_api_session.copy().session,
)
bundles = diagnostics.get_diagnostics_reports()
assert bundle in bundles, 'not found {} in {}'.format(bundle, bundles)
expected_common_files = ['dmesg_-T.output.gz',
'ip_addr.output.gz',
'ip_route.output.gz',
'ps_aux_ww_Z.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1vips.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1records.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsdefault.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsdns.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsmesos_listener.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricslashup.output.gz',
'timedatectl.output.gz',
'binsh_-c_cat etc*-release.output.gz',
'systemctl_list-units_dcos*.output.gz',
'sestatus.output.gz',
'iptables-save.output.gz',
'ip6tables-save.output.gz',
'ipset_list.output.gz',
'opt/mesosphere/active.buildinfo.full.json.gz',
'opt/mesosphere/etc/dcos-version.json.gz',
'opt/mesosphere/etc/expanded.config.json.gz',
'opt/mesosphere/etc/user.config.yaml.gz',
'dcos-diagnostics-health.json',
'var/lib/dcos/cluster-id.gz',
'proc/cmdline.gz',
'proc/cpuinfo.gz',
'proc/meminfo.gz',
'proc/self/mountinfo.gz',
'optmesospherebindetect_ip.output.gz',
'sysctl_-a.output.gz',
]
# these files are expected to be in archive for a master host
expected_master_files = [
'binsh_-c_cat proc`systemctl show dcos-mesos-master.service -p MainPID| cut -d\'=\' -f2`environ.output.gz',
'5050-quota.json',
'5050-overlay-master_state.json.gz',
'dcos-mesos-master.service.gz',
'var/lib/dcos/exhibitor/zookeeper/snapshot/myid.gz',
'var/lib/dcos/exhibitor/conf/zoo.cfg.gz',
'var/lib/dcos/mesos/log/mesos-master.log.gz',
'var/lib/dcos/mesos/log/mesos-master.log.1.gz',
'var/lib/dcos/mesos/log/mesos-master.log.2.gz.gz',
'var/lib/dcos/mesos/log/mesos-master.log.3.gz.gz',
] + expected_common_files
expected_agent_common_files = [
'5051-containers.json',
'5051-overlay-agent_overlay.json',
'var/log/mesos/mesos-agent.log.gz',
'docker_--version.output.gz',
'docker_ps.output.gz',
]
# for agent host
expected_agent_files = [
'dcos-mesos-slave.service.gz',
'binsh_-c_cat proc`systemctl show dcos-mesos-slave.service -p MainPID| cut -d\'=\' -f2`environ.output.gz'
] + expected_agent_common_files + expected_common_files
# for public agent host
expected_public_agent_files = [
'dcos-mesos-slave-public.service.gz',
'binsh_-c_cat proc`systemctl show dcos-mesos-slave-public.service -p MainPID| cut -d\'=\' -f2`environ.output.gz'
] + expected_agent_common_files + expected_common_files
def _read_from_zip(z: zipfile.ZipFile, item: str, to_json=True):
# raises KeyError if item is not in zipfile.
item_content = z.read(item).decode()
if to_json:
# raises ValueError if cannot deserialize item_content.
return json.loads(item_content)
return item_content
def _get_dcos_diagnostics_health(z: zipfile.ZipFile, item: str):
# try to load dcos-diagnostics health report and validate the report is for this host
try:
_health_report = _read_from_zip(z, item)
except KeyError:
# we did not find a key in archive, let's take a look at items in archive and try to read
# diagnostics logs.
# namelist() gets a list of all items in a zip archive.
logging.info(z.namelist())
# summaryErrorsReport.txt and summaryReport.txt are diagnostic job log files.
for log in ('summaryErrorsReport.txt', 'summaryReport.txt'):
try:
log_data = _read_from_zip(z, log, to_json=False)
logging.info("{}:\n{}".format(log, log_data))
except KeyError:
logging.info("Could not read {}".format(log))
raise
except ValueError:
logging.info("Could not deserialize dcos-diagnostics-health")
raise
return _health_report
with tempfile.TemporaryDirectory() as tmp_dir:
bundle_full_location = os.path.join(tmp_dir, bundle)
with open(bundle_full_location, 'wb') as f:
r = dcos_api_session.health.get(os.path.join('/report/diagnostics/serve', bundle), stream=True,
node=dcos_api_session.masters[master_index])
for chunk in r.iter_content(1024):
f.write(chunk)
# validate bundle zip file.
assert zipfile.is_zipfile(bundle_full_location)
z = zipfile.ZipFile(bundle_full_location)
# get a list of all files in a zip archive.
archived_items = z.namelist()
# validate error log is empty
if 'summaryErrorsReport.txt' in archived_items:
log_data = _read_from_zip(z, 'summaryErrorsReport.txt', to_json=False)
raise AssertionError('summaryErrorsReport.txt must be empty. Got {}'.format(log_data))
# validate all files in zip archive are not empty
for item in archived_items:
assert z.getinfo(item).file_size, 'item {} is empty'.format(item)
# make sure all required log files for master node are in place.
for master_ip in dcos_api_session.masters:
master_folder = master_ip + '_master/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, master_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == master_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(master_folder + 'dcos-mesos-master.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(master_folder, archived_items, expected_master_files)
gzipped_state_output = z.open(master_folder + '5050-master_state.json.gz')
validate_state(gzipped_state_output)
# make sure all required log files for agent node are in place.
for slave_ip in dcos_api_session.slaves:
agent_folder = slave_ip + '_agent/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, agent_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == slave_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(agent_folder + 'dcos-mesos-slave.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(agent_folder, archived_items, expected_agent_files)
# make sure all required log files for public agent node are in place.
for public_slave_ip in dcos_api_session.public_slaves:
agent_public_folder = public_slave_ip + '_agent_public/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, agent_public_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == public_slave_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(agent_public_folder + 'dcos-mesos-slave-public.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(agent_public_folder, archived_items, expected_public_agent_files)
def make_nodes_ip_map(dcos_api_session):
"""
a helper function to make a map detected_ip -> external_ip
"""
node_private_public_ip_map = {}
for node in dcos_api_session.masters:
detected_ip = check_json(dcos_api_session.health.get('/', node=node))['ip']
node_private_public_ip_map[detected_ip] = node
for node in dcos_api_session.all_slaves:
detected_ip = check_json(dcos_api_session.health.get('/', node=node))['ip']
node_private_public_ip_map[detected_ip] = node
return node_private_public_ip_map
def validate_node(nodes):
assert isinstance(nodes, list), 'input argument must be a list'
assert len(nodes) > 0, 'input argument cannot be empty'
required_fields = ['host_ip', 'health', 'role']
for node in nodes:
assert len(node) == len(required_fields), 'node should have the following fields: {}. Actual: {}'.format(
', '.join(required_fields), node)
for required_field in required_fields:
assert required_field in node, '{} must be in node. Actual: {}'.format(required_field, node)
# host_ip, health, role fields cannot be empty
assert node['health'] in [0, 1], 'health must be 0 or 1'
assert node['host_ip'], 'host_ip cannot be empty'
assert node['role'], 'role cannot be empty'
def validate_units(units):
assert isinstance(units, list), 'input argument must be list'
assert len(units) > 0, 'input argument cannot be empty'
required_fields = ['id', 'name', 'health', 'description']
for unit in units:
assert len(unit) == len(required_fields), 'a unit must have the following fields: {}. Actual: {}'.format(
', '.join(required_fields), unit)
for required_field in required_fields:
assert required_field in unit, 'unit response must have field: {}. Actual: {}'.format(required_field, unit)
# a unit must have all 3 fields not empty
assert unit['id'], 'id field cannot be empty'
assert unit['name'], 'name field cannot be empty'
assert unit['health'] in [0, 1], 'health must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
def validate_unit(unit):
assert isinstance(unit, dict), 'input argument must be a dict'
required_fields = ['id', 'health', 'output', 'description', 'help', 'name']
assert len(unit) == len(required_fields), 'unit must have the following fields: {}. Actual: {}'.format(
', '.join(required_fields), unit)
for required_field in required_fields:
assert required_field in unit, '{} must be in a unit. Actual: {}'.format(required_field, unit)
# id, name, health, description, help should not be empty
assert unit['id'], 'id field cannot be empty'
assert unit['name'], 'name field cannot be empty'
assert unit['health'] in [0, 1], 'health must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
assert unit['help'], 'help field cannot be empty'
def validate_state(zip_state):
assert isinstance(zip_state, zipfile.ZipExtFile)
state_output = gzip.decompress(zip_state.read())
state = json.loads(state_output)
assert len(state["frameworks"]) > 1, "bundle must contain information about frameworks"
task_count = sum([len(f["tasks"]) for f in state["frameworks"]])
assert task_count > 0, "bundle must contains information about tasks"
def verify_archived_items(folder, archived_items, expected_files):
for expected_file in expected_files:
expected_file = folder + expected_file
# We don't know in advance whether the file will be gzipped or not,
# because that depends on the size of the diagnostics file, which can
# be influenced by multiple factors that are not under our control
# here.
# Since we only want to check whether the file _exists_ and don't care
# about whether it's gzipped or not, we check for an optional `.gz`
# file type in case it wasn't explicitly specified in the assertion.
# For more context, see: https://jira.mesosphere.com/browse/DCOS_OSS-4531
if expected_file.endswith('.gz'):
assert expected_file in archived_items, ('expecting {} in {}'.format(expected_file, archived_items))
else:
expected_gzipped_file = (expected_file + '.gz')
unzipped_exists = expected_file in archived_items
gzipped_exists = expected_gzipped_file in archived_items
message = ('expecting {} or {} in {}'.format(expected_file, expected_gzipped_file, archived_items))
assert (unzipped_exists or gzipped_exists), message
def verify_unit_response(zip_ext_file, min_lines):
assert isinstance(zip_ext_file, zipfile.ZipExtFile)
unit_output = gzip.decompress(zip_ext_file.read())
assert len(unit_output.decode().split('\n')) >= min_lines, 'Expect at least {} lines. Full unit output {}'.format(
min_lines, unit_output)
|
|
# Copyright (c) 2021, Manfred Moitzi
# License: MIT License
from typing import Any, List, Dict, Optional
import textwrap
from ezdxf.lldxf.types import (
render_tag,
DXFVertex,
GROUP_MARKERS,
POINTER_CODES,
)
from ezdxf.addons.xqt import QModelIndex, QAbstractTableModel, Qt
from ezdxf.addons.xqt import QStandardItemModel, QStandardItem, QColor
from .tags import compile_tags, Tags
__all__ = [
"DXFTagsModel",
"DXFStructureModel",
"EntityContainer",
"Entity",
"DXFTagsRole",
]
DXFTagsRole = Qt.UserRole + 1 # type: ignore
def name_fmt(handle, name: str) -> str:
if handle is None:
return name
else:
return f"<{handle}> {name}"
HEADER_LABELS = ["Group Code", "Data Type", "Content", "4", "5"]
def calc_line_numbers(start: int, tags: Tags) -> List[int]:
numbers = [start]
index = start
for tag in tags:
if isinstance(tag, DXFVertex):
index += len(tag.value) * 2
else:
index += 2
numbers.append(index)
return numbers
class DXFTagsModel(QAbstractTableModel):
def __init__(
self, tags: Tags, start_line_number: int = 1, valid_handles=None
):
super().__init__()
self._tags = compile_tags(tags)
self._line_numbers = calc_line_numbers(start_line_number, self._tags)
self._valid_handles = valid_handles or set()
def data(self, index: QModelIndex, role: int = ...) -> Any: # type: ignore
def is_invalid_handle(tag):
if (
tag.code in POINTER_CODES
and not tag.value.upper() in self._valid_handles
):
return True
return False
if role == Qt.DisplayRole:
tag = self._tags[index.row()]
return render_tag(tag, index.column())
elif role == Qt.ForegroundRole:
tag = self._tags[index.row()]
if tag.code in GROUP_MARKERS:
return QColor("blue")
elif is_invalid_handle(tag):
return QColor("red")
elif role == DXFTagsRole:
return self._tags[index.row()]
elif role == Qt.ToolTipRole:
code, value = self._tags[index.row()]
if index.column() == 0: # group code column
return GROUP_CODE_TOOLTIPS_DICT.get(code)
code, value = self._tags[index.row()]
if code in POINTER_CODES:
if value.upper() in self._valid_handles:
return f"Double click to go to the referenced entity"
else:
return f"Handle does not exist"
elif code == 0:
return f"Double click to go to the DXF reference provided by Autodesk"
def headerData(
self, section: int, orientation: Qt.Orientation, role: int = ... # type: ignore
) -> Any:
if orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
return HEADER_LABELS[section]
elif role == Qt.TextAlignmentRole:
return Qt.AlignLeft
elif orientation == Qt.Vertical:
if role == Qt.DisplayRole:
return self._line_numbers[section]
elif role == Qt.ToolTipRole:
return "Line number in DXF file"
def rowCount(self, parent: QModelIndex = ...) -> int: # type: ignore
return len(self._tags)
def columnCount(self, parent: QModelIndex = ...) -> int: # type: ignore
return 3
def compiled_tags(self) -> Tags:
"""Returns the compiled tags. Only points codes are compiled, group
code 10, ...
"""
return self._tags
def line_number(self, row: int) -> int:
"""Return the DXF file line number of the widget-row."""
try:
return self._line_numbers[row]
except IndexError:
return 0
class EntityContainer(QStandardItem):
def __init__(self, name: str, entities: List[Tags]):
super().__init__()
self.setEditable(False)
self.setText(name + f" ({len(entities)})")
self.setup_content(entities)
def setup_content(self, entities):
self.appendRows([Entity(e) for e in entities])
class Classes(EntityContainer):
def setup_content(self, entities):
self.appendRows([Class(e) for e in entities])
class AcDsData(EntityContainer):
def setup_content(self, entities):
self.appendRows([AcDsEntry(e) for e in entities])
class NamedEntityContainer(EntityContainer):
def setup_content(self, entities):
self.appendRows([NamedEntity(e) for e in entities])
class Tables(EntityContainer):
def setup_content(self, entities):
container = []
name = ""
for e in entities:
container.append(e)
dxftype = e.dxftype()
if dxftype == "TABLE":
try:
handle = e.get_handle()
except ValueError:
handle = None
name = e.get_first_value(2, default="UNDEFINED")
name = name_fmt(handle, name)
elif dxftype == "ENDTAB":
if container:
container.pop() # remove ENDTAB
self.appendRow(NamedEntityContainer(name, container))
container.clear()
class Blocks(EntityContainer):
def setup_content(self, entities):
container = []
name = "UNDEFINED"
for e in entities:
container.append(e)
dxftype = e.dxftype()
if dxftype == "BLOCK":
try:
handle = e.get_handle()
except ValueError:
handle = None
name = e.get_first_value(2, default="UNDEFINED")
name = name_fmt(handle, name)
elif dxftype == "ENDBLK":
if container:
self.appendRow(EntityContainer(name, container))
container.clear()
def get_section_name(section: List[Tags]) -> str:
if len(section) > 0:
header = section[0]
if len(header) > 1 and header[0].code == 0 and header[1].code == 2:
return header[1].value
return "INVALID SECTION HEADER!"
class Entity(QStandardItem):
def __init__(self, tags: Tags):
super().__init__()
self.setEditable(False)
self._tags = tags
self._handle: Optional[str]
try:
self._handle = tags.get_handle()
except ValueError:
self._handle = None
self.setText(self.entity_name())
def entity_name(self):
name = "INVALID ENTITY!"
tags = self._tags
if tags and tags[0].code == 0:
name = name_fmt(self._handle, tags[0].value)
return name
def data(self, role: int = ...) -> Any: # type: ignore
if role == DXFTagsRole:
return self._tags
else:
return super().data(role)
class Header(Entity):
def entity_name(self):
return "HEADER"
class ThumbnailImage(Entity):
def entity_name(self):
return "THUMBNAILIMAGE"
class NamedEntity(Entity):
def entity_name(self):
name = self._tags.get_first_value(2, "<noname>")
return name_fmt(str(self._handle), name)
class Class(Entity):
def entity_name(self):
tags = self._tags
name = "INVALID CLASS!"
if len(tags) > 1 and tags[0].code == 0 and tags[1].code == 1:
name = tags[1].value
return name
class AcDsEntry(Entity):
def entity_name(self):
return self._tags[0].value
class DXFStructureModel(QStandardItemModel):
def __init__(self, filename: str, doc):
super().__init__()
root = QStandardItem(filename)
root.setEditable(False)
self.appendRow(root)
row: Any
for section in doc.sections.values():
name = get_section_name(section)
if name == "HEADER":
row = Header(section[0])
elif name == "THUMBNAILIMAGE":
row = ThumbnailImage(section[0])
elif name == "CLASSES":
row = Classes(name, section[1:])
elif name == "TABLES":
row = Tables(name, section[1:])
elif name == "BLOCKS":
row = Blocks(name, section[1:])
elif name == "ACDSDATA":
row = AcDsData(name, section[1:])
else:
row = EntityContainer(name, section[1:])
root.appendRow(row)
def index_of_entity(self, entity: Tags) -> QModelIndex:
root = self.item(0, 0)
index = find_index(root, entity)
if index is None:
return root.index()
else:
return index
def find_index(item: QStandardItem, entity: Tags) -> Optional[QModelIndex]:
def _find(sub_item: QStandardItem):
for index in range(sub_item.rowCount()):
child = sub_item.child(index, 0)
tags = child.data(DXFTagsRole)
if tags and tags is entity:
return child.index()
if child.rowCount() > 0:
index2 = _find(child)
if index2 is not None:
return index2
return None
return _find(item)
GROUP_CODE_TOOLTIPS = [
(0, "Text string indicating the entity type (fixed)"),
(1, "Primary text value for an entity"),
(2, "Name (attribute tag, block name, and so on)"),
((3, 4), "Other text or name values"),
(5, "Entity handle; text string of up to 16 hexadecimal digits (fixed)"),
(6, "Linetype name (fixed)"),
(7, "Text style name (fixed)"),
(8, "Layer name (fixed)"),
(
9,
"DXF: variable name identifier (used only in HEADER section of the DXF file)",
),
(
10,
"Primary point; this is the start point of a line or text entity, center "
"of a circle, and so on DXF: X value of the primary point (followed by Y "
"and Z value codes 20 and 30) APP: 3D point (list of three reals)",
),
(
(11, 18),
"Other points DXF: X value of other points (followed by Y value codes "
"21-28 and Z value codes 31-38) APP: 3D point (list of three reals)",
),
(20, "DXF: Y value of the primary point"),
(30, "DXF: Z value of the primary point"),
((21, 28), "DXF: Y values of other points"),
((31, 37), "DXF: Z values of other points"),
(38, "DXF: entity's elevation if nonzero"),
(39, "Entity's thickness if nonzero (fixed)"),
(
(40, 47),
"Double-precision floating-point values (text height, scale factors, and so on)",
),
(48, "Linetype scale; default value is defined for all entity types"),
(
49,
"Multiple 49 groups may appear in one entity for variable-length tables "
"(such as the dash lengths in the LTYPE table). A 7x group always appears "
"before the first 49 group to specify the table length",
),
(
(50, 58),
"Angles (output in degrees to DXF files and radians through AutoLISP and ObjectARX applications)",
),
(
60,
"Entity visibility; absence or 0 indicates visibility; 1 indicates invisibility",
),
(62, "Color number (fixed)"),
(66, "Entities follow flag (fixed)"),
(67, "0 for model space or 1 for paper space (fixed)"),
(
68,
"APP: identifies whether viewport is on but fully off screen; is not active or is off",
),
(69, "APP: viewport identification number"),
((70, 79), "Integer values, such as repeat counts, flag bits, or modes"),
((90, 99), "32-bit integer values"),
(
100,
"Subclass data marker (with derived class name as a string). "
"Required for all objects and entity classes that are derived from "
"another concrete class. The subclass data marker segregates data defined by different "
"classes in the inheritance chain for the same object. This is in addition "
"to the requirement for DXF names for each distinct concrete class derived "
"from ObjectARX (see Subclass Markers)",
),
(101, "Embedded object marker"),
(
102,
"Control string, followed by '{arbitrary name' or '}'. Similar to the "
"xdata 1002 group code, except that when the string begins with '{', it "
"can be followed by an arbitrary string whose interpretation is up to the "
"application. The only other control string allowed is '}' as a group "
"terminator. AutoCAD does not interpret these strings except during d"
"rawing audit operations. They are for application use.",
),
(105, "Object handle for DIMVAR symbol table entry"),
(
110,
"UCS origin (appears only if code 72 is set to 1); DXF: X value; APP: 3D point",
),
(
111,
"UCS Y-axis (appears only if code 72 is set to 1); DXF: Y value; APP: 3D vector",
),
(
112,
"UCS Z-axis (appears only if code 72 is set to 1); DXF: Z value; APP: 3D vector",
),
((120, 122), "DXF: Y value of UCS origin, UCS X-axis, and UCS Y-axis"),
((130, 132), "DXF: Z value of UCS origin, UCS X-axis, and UCS Y-axis"),
(
(140, 149),
"Double-precision floating-point values (points, elevation, and DIMSTYLE settings, for example)",
),
(
(170, 179),
"16-bit integer values, such as flag bits representing DIMSTYLE settings",
),
(
210,
"Extrusion direction (fixed) "
+ "DXF: X value of extrusion direction "
+ "APP: 3D extrusion direction vector",
),
(220, "DXF: Y value of the extrusion direction"),
(230, "DXF: Z value of the extrusion direction"),
((270, 279), "16-bit integer values"),
((280, 289), "16-bit integer value"),
((290, 299), "Boolean flag value; 0 = False; 1 = True"),
((300, 309), "Arbitrary text strings"),
(
(310, 319),
"Arbitrary binary chunks with same representation and limits as 1004 "
"group codes: hexadecimal strings of up to 254 characters represent data "
"chunks of up to 127 bytes",
),
(
(320, 329),
"Arbitrary object handles; handle values that are taken 'as is'. They "
"are not translated during INSERT and XREF operations",
),
(
(330, 339),
"Soft-pointer handle; arbitrary soft pointers to other objects within "
"same DXF file or drawing. Translated during INSERT and XREF operations",
),
(
(340, 349),
"Hard-pointer handle; arbitrary hard pointers to other objects within "
"same DXF file or drawing. Translated during INSERT and XREF operations",
),
(
(350, 359),
"Soft-owner handle; arbitrary soft ownership links to other objects "
"within same DXF file or drawing. Translated during INSERT and XREF "
"operations",
),
(
(360, 369),
"Hard-owner handle; arbitrary hard ownership links to other objects within "
"same DXF file or drawing. Translated during INSERT and XREF operations",
),
(
(370, 379),
"Lineweight enum value (AcDb::LineWeight). Stored and moved around as a 16-bit integer. "
"Custom non-entity objects may use the full range, but entity classes only use 371-379 DXF "
"group codes in their representation, because AutoCAD and AutoLISP both always assume a 370 "
"group code is the entity's lineweight. This allows 370 to behave like other 'common' entity fields",
),
(
(380, 389),
"PlotStyleName type enum (AcDb::PlotStyleNameType). Stored and moved around as a 16-bit integer. "
"Custom non-entity objects may use the full range, but entity classes only use 381-389 "
"DXF group codes in their representation, for the same reason as the lineweight range",
),
(
(390, 399),
"String representing handle value of the PlotStyleName object, basically a hard pointer, but has "
"a different range to make backward compatibility easier to deal with. Stored and moved around "
"as an object ID (a handle in DXF files) and a special type in AutoLISP. Custom non-entity objects "
"may use the full range, but entity classes only use 391-399 DXF group codes in their representation, "
"for the same reason as the lineweight range",
),
((400, 409), "16-bit integers"),
((410, 419), "String"),
(
(420, 427),
"32-bit integer value. When used with True Color; a 32-bit integer representing a 24-bit color value. "
"The high-order byte (8 bits) is 0, the low-order byte an unsigned char holding the Blue value (0-255), "
"then the Green value, and the next-to-high order byte is the Red Value. Converting this integer value to "
"hexadecimal yields the following bit mask: 0x00RRGGBB. "
"For example, a true color with Red==200, Green==100 and Blue==50 is 0x00C86432, and in DXF, in decimal, 13132850",
),
(
(430, 437),
"String; when used for True Color, a string representing the name of the color",
),
(
(440, 447),
"32-bit integer value. When used for True Color, the transparency value",
),
((450, 459), "Long"),
((460, 469), "Double-precision floating-point value"),
((470, 479), "String"),
(
(480, 481),
"Hard-pointer handle; arbitrary hard pointers to other objects within same DXF file or drawing. "
"Translated during INSERT and XREF operations",
),
(
999,
"DXF: The 999 group code indicates that the line following it is a comment string. SAVEAS does "
"not include such groups in a DXF output file, but OPEN honors them and ignores the comments. "
"You can use the 999 group to include comments in a DXF file that you have edited",
),
(1000, "ASCII string (up to 255 bytes long) in extended data"),
(
1001,
"Registered application name (ASCII string up to 31 bytes long) for extended data",
),
(1002, "Extended data control string ('{' or '}')"),
(1003, "Extended data layer name"),
(1004, "Chunk of bytes (up to 127 bytes long) in extended data"),
(
1005,
"Entity handle in extended data; text string of up to 16 hexadecimal digits",
),
(
1010,
"A point in extended data; DXF: X value (followed by 1020 and 1030 groups); APP: 3D point",
),
(1020, "DXF: Y values of a point"),
(1030, "DXF: Z values of a point"),
(
1011,
"A 3D world space position in extended data "
"DXF: X value (followed by 1021 and 1031 groups) "
"APP: 3D point",
),
(1021, "DXF: Y value of a world space position"),
(1031, "DXF: Z value of a world space position"),
(
1012,
"A 3D world space displacement in extended data "
"DXF: X value (followed by 1022 and 1032 groups) "
"APP: 3D vector",
),
(1022, "DXF: Y value of a world space displacement"),
(1032, "DXF: Z value of a world space displacement"),
(
1013,
"A 3D world space direction in extended data "
"DXF: X value (followed by 1022 and 1032 groups) "
"APP: 3D vector",
),
(1023, "DXF: Y value of a world space direction"),
(1033, "DXF: Z value of a world space direction"),
(1040, "Extended data double-precision floating-point value"),
(1041, "Extended data distance value"),
(1042, "Extended data scale factor"),
(1070, "Extended data 16-bit signed integer"),
(1071, "Extended data 32-bit signed long"),
]
def build_group_code_tooltip_dict() -> Dict[int, str]:
tooltips = dict()
for code, tooltip in GROUP_CODE_TOOLTIPS:
tooltip = "\n".join(textwrap.wrap(tooltip, width=80))
if isinstance(code, int):
tooltips[code] = tooltip
elif isinstance(code, tuple):
s, e = code
for group_code in range(s, e + 1):
tooltips[group_code] = tooltip
else:
raise ValueError(type(code))
return tooltips
GROUP_CODE_TOOLTIPS_DICT = build_group_code_tooltip_dict()
|
|
'''
This module contains the command-line parser. All imports are inside the functions
because we don't want to execute code before the parser is created and when Pyg is
used as a library.
'''
ITERABLE_T = (list, tuple)
COMMANDS = set(['install', 'remove', 'bundle', 'pack', 'download', 'update',
'search', 'list', 'site', 'check', 'shell', 'completion', 'help'])
def load_options():
import os.path
from pyg.core import args_manager
from pyg.locations import CFG_FILES
from pyg.log import logger
if CFG_FILES:
for cfg in CFG_FILES:
if os.path.exists(cfg):
logger.debug('Loading options from {0}', cfg)
## This is for potential warnings
logger.indent = 8
args_manager.load(cfg)
logger.indent = 0
break
def init_parser(version=None):
import os
import _opts as opts
from pyg.locations import INSTALL_DIR, USER_SITE
from pyg.parser.formatter import _formatter
from pyg.core import args_manager
from argh import ArghParser, arg, command
parser = ArghParser(prog='pyg')
parser.add_argument('-d', '--debug', action='store_true', help='Set logger to DEBUG level')
parser.add_argument('--verbose', action='store_true', help='Set logger to VERBOSE level')
if version is not None:
parser.add_argument('-v', '--version', action='version', version=version)
parser.add_argument('--no-colors', action='store_true', help='Disable colors')
#parser.add_argument('-i', '--index-url', default='http://pypi.python.org', metavar="<url>", help='Base URL of Python Package Index (default to %(default)s)')
@arg('packname', nargs='*')
@arg('-e', '--editable', action='store_true', help='Install a package from an online repository in editable mode')
@arg('-r', '--req-file', metavar='<path>', action='append', help='Install packages from the specified requirement file')
@arg('-U', '--upgrade', action='store_true', help='If the package is already installed re-install it again')
@arg('-A', '--upgrade-all', action='store_true', help='Like -U, --upgrade, but install again dependencies too')
@arg('-n', '--no-deps', action='store_true', help='Do not install dependencies')
@arg('-g', '--ignore', action='store_true', help='Ignore local files or directories')
@arg('-i', '--index-url', default='http://pypi.python.org', metavar='<url>', help='Base URL of Python Package Index (default to %(default)s)')
@arg('-d', '--install-dir', default=INSTALL_DIR, metavar='<path>', help='Base installation directory')
@arg('-u', '--user', action='store_true', help='Install to user site')
@arg('--no-scripts', action='store_true', help='Do not install scripts')
@arg('--no-data', action='store_true', help='Do not install data files')
@arg('--force-egg-install', action='store_true', help='Allow installing eggs with a different Python version')
def install(args):
'''
Install a package
'''
if args.no_deps:
args_manager['install']['no_deps'] = True
if args.upgrade:
args_manager['install']['upgrade'] = True
if args.no_scripts:
args_manager['install']['no_scripts'] = True
if args.no_data:
args_manager['install']['no_data'] = True
if args.ignore:
args_manager['install']['ignore'] = True
if args.force_egg_install:
args_manager['install']['force_egg_install'] = True
if isinstance(args.index_url, ITERABLE_T):
args.index_url = args.index_url[0]
args_manager['install']['packages_url'] = args.index_url + '/simple'
args_manager['install']['index_url'] = args.index_url + '/pypi'
if args.upgrade_all:
args_manager['install']['upgrade_all'] = True
args_manager['install']['upgrade'] = True
if args.user:
args_manager['install']['user'] = True
args_manager['install']['install_dir'] = USER_SITE
if args.install_dir != INSTALL_DIR:
dir = os.path.abspath(args.install_dir)
args_manager['install']['install_dir'] = dir
if any(os.path.basename(dir) == p for p in args.packname):
## Automatically set ignore=True when INSTALL_DIR has the same
## name of one of the packages to install
args_manager['install']['ignore'] = True
opts.install_func(args.packname, args.req_file, args.editable,
args_manager['install']['ignore'])
@arg('packname', nargs='+')
@arg('-r', '--req-file', metavar='<path>', help='Uninstall all the packages listed in the given requirement file')
@arg('-y', '--yes', action='store_true', help='Do not ask confirmation of uninstall deletions')
@arg('-l', '--local', action='store_true', help='Add to files to delete local files too.')
@arg('-i', '--info', action='store_true', help='Only list files to delete')
def remove(args):
'''
Remove a package
'''
if args.yes:
args_manager['remove']['yes'] = True
if args.info:
args_manager['remove']['info'] = True
if args.local:
args_manager['remove']['local'] = True
opts.remove_func(args.packname, args.req_file,
args_manager['remove']['yes'], args_manager['remove']['info'],
args_manager['remove']['local'])
@arg('packname', nargs=1)
@arg('-i', '--index-url', nargs=1, default='http://pypi.python.org', metavar='<url>', help='Base URL of Python Package Index (default to %(default)s)')
def list(args):
'''
List all versions for a package
'''
if isinstance(args.index_url, ITERABLE_T):
args.index_url = args.index_url[0]
args_manager['install']['packages_url'] = args.index_url + '/simple'
args_manager['install']['index_url'] = args.index_url + '/pypi'
opts.list_func(args.packname[0])
@arg('-c', '--count', action='store_true', help='Only returns requirements count')
@arg('-n', '--no-info', action='store_true', help='Do not add site information')
@arg('-f', '--file', metavar='<path>', help='Writes requirements into the specified file')
def site(args):
'''
Show installed packages and some site information
'''
if args.count:
args_manager['site']['count'] = True
if args.no_info:
args_manager['site']['no_info'] = True
if args.file:
args_manager['site']['file'] = args.file
count, no_info, file = args_manager['site']['count'], \
args_manager['site']['no_info'], args_manager['site']['file']
opts.site_func(count, no_info, file)
@arg('query', nargs='+')
@arg('-i', '--index-url', default='http://pypi.python.org', metavar='<url>', help='Base URL of Python Package Index (default to %(default)s)')
@arg('-e', '--exact', action='store_true', help='List only exact hits')
@arg('-n', '--max-num', type=int, default=None, help='List at most <num> results')
@arg('-a', '--all', action='store_true', help='Show all versions for specified package')
def search(args):
'''
Search PyPI
'''
if isinstance(args.index_url, ITERABLE_T):
args.index_url = args.index_url[0]
args_manager['install']['packages_url'] = args.index_url + '/simple'
args_manager['install']['index_url'] = args.index_url + '/pypi'
opts.search_func(args.query, args.exact, args.all, args.max_num)
@arg('packname')
@arg('-i', '--info', action='store_true', help='Show infos for specified package')
def check(args):
'''
Check if a package is installed
'''
opts.check_func(args.packname, args.info)
@arg('packname')
@arg('-i', '--index-url', default='http://pypi.python.org', metavar='<url>', help='Base URL of Python Package Index (default to %(default)s)')
@arg('-u', '--unpack', action='store_true', help='Once downloaded, unpack the package')
@arg('-d', '--download-dir', default=os.path.curdir, metavar='<path>', help='The destination directory')
@arg('-p', '--prefer', metavar='<ext>', help='The preferred file type for the download')
@arg('-m', '--md5',action='store_true', help='Show md5 sum & link after download')
@arg('-n', '--dry',action='store_true', help='Dry run, just display informations')
def download(args):
'''
Download a package
'''
if isinstance(args.index_url, ITERABLE_T):
args.index_url = args.index_url[0]
args_manager['install']['packages_url'] = args.index_url + '/simple'
args_manager['install']['index_url'] = args.index_url + '/pypi'
if args.download_dir != args_manager['download']['download_dir']:
args_manager['download']['download_dir'] = args.download_dir
if args.prefer != args_manager['download']['prefer']:
args_manager['download']['prefer'] = args.prefer
args_manager['download']['unpack'] = bool(args.unpack)
args_manager['download']['md5'] = bool(args.md5)
if args.dry:
args_manager['download']['download_dir'] = None
opts.download_func(args)
@arg('-i', '--index-url', default='http://pypi.python.org', metavar='<url>', help='Base URL of Python Package Index (default to %(default)s)')
@arg('-y', '--yes', action='store_true', help='Do not ask confirmation for the upgrade')
def update(args):
'''
Check for updates for installed packages
'''
if isinstance(args.index_url, ITERABLE_T):
args.index_url = args.index_url[0]
args_manager['install']['packages_url'] = args.index_url + '/simple'
args_manager['install']['index_url'] = args.index_url + '/pypi'
if args.yes:
args_manager['update']['yes'] = True
opts.update_func()
@command
def shell():
'''
Fire up Pyg Shell
'''
opts.shell_func()
@arg('bundlename', help='Name of the bundle to create')
@arg('packages', nargs='*', help='Name of the package(s) to bundle')
@arg('-i', '--index-url', default='http://pypi.python.org', metavar='<url>', help='Base URL of Python Package Index (default to %(default)s)')
@arg('-r', '--req-file', action='append', metavar='<path>', help='Requirement files which contains packages to bundle')
@arg('-e', '--exclude', action='append', default=[], metavar='<requirement>', help='Exclude packages matching `requirement`')
@arg('-d', '--use-develop', action='store_true', help='Look for local packages before downloading them')
def bundle(args):
'''
Create bundles (like Pip's ones)
'''
if isinstance(args.index_url, ITERABLE_T):
args.index_url = args.index_url[0]
args_manager['install']['packages_url'] = args.index_url + '/simple'
args_manager['install']['index_url'] = args.index_url + '/pypi'
if args.exclude:
args_manager['bundle']['exclude'] = args.exclude
if args.use_develop:
args_manager['bundle']['use_develop'] = True
exclude, use_develop = args_manager['bundle']['exclude'], args_manager['bundle']['use_develop']
opts.bundle_func(args.packages, args.bundlename, exclude, args.req_file, use_develop)
@arg('packname', help='Name of the pack to create')
@arg('package', help='Name of the package to pack')
@arg('-i', '--index-url', default='http://pypi.python.org', metavar='<url>', help='Base URL of Python Package Index (default to %(default)s)')
@arg('-d', '--use-develop', action='store_true', help='Look for local packages before downloading them')
@arg('-e', '--exclude', action='append', default=[], metavar='<requirement>', help='Exclude packages matching `requirement`')
def pack(args):
'''
Create packs
'''
if isinstance(args.index_url, ITERABLE_T):
args.index_url = args.index_url[0]
args_manager['install']['packages_url'] = args.index_url + '/simple'
args_manager['install']['index_url'] = args.index_url + '/pypi'
# XXX: Duplication is evil. (See above.)
if args.exclude:
args_manager['pack']['exclude'] = args.exclude
if args.use_develop:
args_manager['pack']['use_develop'] = True
exclude, use_develop = args_manager['pack']['exclude'], args_manager['pack']['use_develop']
return opts.pack_func(args.package, args.packname, exclude, use_develop)
@arg('-f', '--file', metavar='<path>', help='Write code for completion into the specified file. Default to %(default)r')
def completion(args):
'''
Generate bash code for Pyg completion
'''
return opts.completion_func(COMMANDS, args.file)
@command
def help():
'''
Show this help and exit
'''
return
parser.add_commands([locals()[cmd] for cmd in COMMANDS])
parser.formatter_class = _formatter(parser)
return parser
|
|
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for group_snapshot code.
"""
import ddt
import mock
from oslo_policy import policy as oslo_policy
from six.moves import http_client
import webob
from cinder.api import microversions as mv
from cinder.api.v3 import group_snapshots as v3_group_snapshots
from cinder import context
from cinder import db
from cinder import exception
from cinder.group import api as group_api
from cinder import objects
from cinder.objects import fields
from cinder.policies import base as base_policy
from cinder.policies import group_snapshots as group_snapshots_policy
from cinder import policy
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils
import cinder.volume
@ddt.ddt
class GroupSnapshotsAPITestCase(test.TestCase):
"""Test Case for group_snapshots API."""
def setUp(self):
super(GroupSnapshotsAPITestCase, self).setUp()
self.controller = v3_group_snapshots.GroupSnapshotsController()
self.volume_api = cinder.volume.API()
self.context = context.get_admin_context()
self.context.project_id = fake.PROJECT_ID
self.context.user_id = fake.USER_ID
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
self.group = utils.create_group(self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID])
self.volume = utils.create_volume(self.context,
group_id=self.group.id,
volume_type_id=fake.VOLUME_TYPE_ID)
self.g_snapshots_array = [
utils.create_group_snapshot(
self.context,
group_id=self.group.id,
group_type_id=self.group.group_type_id) for _ in range(3)]
self.addCleanup(self._cleanup)
def _cleanup(self):
for snapshot in self.g_snapshots_array:
snapshot.destroy()
self.volume.destroy()
self.group.destroy()
def test_show_group_snapshot(self):
group_snapshot = utils.create_group_snapshot(
self.context, group_id=self.group.id)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=mv.GROUP_SNAPSHOTS)
res_dict = self.controller.show(req, group_snapshot.id)
self.assertEqual(1, len(res_dict))
self.assertEqual('this is a test group snapshot',
res_dict['group_snapshot']['description'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshot']['name'])
self.assertEqual(fields.GroupSnapshotStatus.CREATING,
res_dict['group_snapshot']['status'])
group_snapshot.destroy()
@ddt.data(True, False)
def test_list_group_snapshots_with_limit(self, is_detail):
url = '/v3/%s/group_snapshots?limit=1' % fake.PROJECT_ID
if is_detail:
url = '/v3/%s/group_snapshots/detail?limit=1' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url,
version=mv.GROUP_SNAPSHOT_PAGINATION)
if is_detail:
res_dict = self.controller.detail(req)
else:
res_dict = self.controller.index(req)
self.assertEqual(2, len(res_dict))
self.assertEqual(1, len(res_dict['group_snapshots']))
self.assertEqual(self.g_snapshots_array[2].id,
res_dict['group_snapshots'][0]['id'])
next_link = (
'http://localhost/v3/%s/group_snapshots?limit='
'1&marker=%s' %
(fake.PROJECT_ID, res_dict['group_snapshots'][0]['id']))
self.assertEqual(next_link,
res_dict['group_snapshot_links'][0]['href'])
if is_detail:
self.assertIn('description', res_dict['group_snapshots'][0].keys())
else:
self.assertNotIn('description',
res_dict['group_snapshots'][0].keys())
@ddt.data(True, False)
def test_list_group_snapshot_with_offset(self, is_detail):
url = '/v3/%s/group_snapshots?offset=1' % fake.PROJECT_ID
if is_detail:
url = '/v3/%s/group_snapshots/detail?offset=1' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url,
version=mv.GROUP_SNAPSHOT_PAGINATION)
if is_detail:
res_dict = self.controller.detail(req)
else:
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(2, len(res_dict['group_snapshots']))
self.assertEqual(self.g_snapshots_array[1].id,
res_dict['group_snapshots'][0]['id'])
self.assertEqual(self.g_snapshots_array[0].id,
res_dict['group_snapshots'][1]['id'])
if is_detail:
self.assertIn('description', res_dict['group_snapshots'][0].keys())
else:
self.assertNotIn('description',
res_dict['group_snapshots'][0].keys())
@ddt.data(True, False)
def test_list_group_snapshot_with_offset_out_of_range(self, is_detail):
url = ('/v3/%s/group_snapshots?offset=234523423455454' %
fake.PROJECT_ID)
if is_detail:
url = ('/v3/%s/group_snapshots/detail?offset=234523423455454' %
fake.PROJECT_ID)
req = fakes.HTTPRequest.blank(url,
version=mv.GROUP_SNAPSHOT_PAGINATION)
if is_detail:
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail,
req)
else:
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index,
req)
@ddt.data(False, True)
def test_list_group_snapshot_with_limit_and_offset(self, is_detail):
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=self.group.id,
group_type_id=self.group.group_type_id)
url = '/v3/%s/group_snapshots?limit=2&offset=1' % fake.PROJECT_ID
if is_detail:
url = ('/v3/%s/group_snapshots/detail?limit=2&offset=1' %
fake.PROJECT_ID)
req = fakes.HTTPRequest.blank(url,
version=mv.GROUP_SNAPSHOT_PAGINATION)
if is_detail:
res_dict = self.controller.detail(req)
else:
res_dict = self.controller.index(req)
self.assertEqual(2, len(res_dict))
self.assertEqual(2, len(res_dict['group_snapshots']))
self.assertEqual(self.g_snapshots_array[2].id,
res_dict['group_snapshots'][0]['id'])
self.assertEqual(self.g_snapshots_array[1].id,
res_dict['group_snapshots'][1]['id'])
self.assertIsNotNone(res_dict['group_snapshot_links'][0]['href'])
if is_detail:
self.assertIn('description', res_dict['group_snapshots'][0].keys())
else:
self.assertNotIn('description',
res_dict['group_snapshots'][0].keys())
group_snapshot.destroy()
@ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER),
mv.RESOURCE_FILTER,
mv.LIKE_FILTER)
@mock.patch('cinder.api.common.reject_invalid_filters')
def test_group_snapshot_list_with_general_filter(self,
version, mock_update):
url = '/v3/%s/group_snapshots' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url,
version=version,
use_admin_context=False)
self.controller.index(req)
if version != mv.get_prior_version(mv.RESOURCE_FILTER):
support_like = True if version == mv.LIKE_FILTER else False
mock_update.assert_called_once_with(req.environ['cinder.context'],
mock.ANY, 'group_snapshot',
support_like)
@ddt.data(False, True)
def test_list_group_snapshot_with_filter(self, is_detail):
url = ('/v3/%s/group_snapshots?'
'all_tenants=True&id=%s') % (fake.PROJECT_ID,
self.g_snapshots_array[0].id)
if is_detail:
url = ('/v3/%s/group_snapshots/detail?'
'all_tenants=True&id=%s') % (fake.PROJECT_ID,
self.g_snapshots_array[0].id)
req = fakes.HTTPRequest.blank(url,
version=mv.GROUP_SNAPSHOT_PAGINATION,
use_admin_context=True)
if is_detail:
res_dict = self.controller.detail(req)
else:
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(1, len(res_dict['group_snapshots']))
self.assertEqual(self.g_snapshots_array[0].id,
res_dict['group_snapshots'][0]['id'])
if is_detail:
self.assertIn('description', res_dict['group_snapshots'][0].keys())
else:
self.assertNotIn('description',
res_dict['group_snapshots'][0].keys())
@ddt.data({'is_detail': True, 'version': mv.GROUP_SNAPSHOTS},
{'is_detail': False, 'version': mv.GROUP_SNAPSHOTS},
{'is_detail': True, 'version': mv.POOL_FILTER},
{'is_detail': False, 'version': mv.POOL_FILTER},)
@ddt.unpack
def test_list_group_snapshot_with_filter_previous_version(self, is_detail,
version):
url = ('/v3/%s/group_snapshots?'
'all_tenants=True&id=%s') % (fake.PROJECT_ID,
self.g_snapshots_array[0].id)
if is_detail:
url = ('/v3/%s/group_snapshots/detail?'
'all_tenants=True&id=%s') % (fake.PROJECT_ID,
self.g_snapshots_array[0].id)
req = fakes.HTTPRequest.blank(url, version=version,
use_admin_context=True)
if is_detail:
res_dict = self.controller.detail(req)
else:
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['group_snapshots']))
@ddt.data(False, True)
def test_list_group_snapshot_with_sort(self, is_detail):
url = '/v3/%s/group_snapshots?sort=id:asc' % fake.PROJECT_ID
if is_detail:
url = ('/v3/%s/group_snapshots/detail?sort=id:asc' %
fake.PROJECT_ID)
req = fakes.HTTPRequest.blank(url,
version=mv.GROUP_SNAPSHOT_PAGINATION)
expect_result = [snapshot.id for snapshot in self.g_snapshots_array]
expect_result.sort()
if is_detail:
res_dict = self.controller.detail(req)
else:
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['group_snapshots']))
self.assertEqual(expect_result[0],
res_dict['group_snapshots'][0]['id'])
self.assertEqual(expect_result[1],
res_dict['group_snapshots'][1]['id'])
self.assertEqual(expect_result[2],
res_dict['group_snapshots'][2]['id'])
if is_detail:
self.assertIn('description', res_dict['group_snapshots'][0].keys())
else:
self.assertNotIn('description',
res_dict['group_snapshots'][0].keys())
def test_show_group_snapshot_with_group_snapshot_not_found(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID),
version=mv.GROUP_SNAPSHOTS)
self.assertRaises(exception.GroupSnapshotNotFound,
self.controller.show,
req, fake.WILL_NOT_BE_FOUND_ID)
@ddt.data(True, False)
def test_list_group_snapshots_json(self, is_detail):
if is_detail:
request_url = '/v3/%s/group_snapshots/detail'
else:
request_url = '/v3/%s/group_snapshots'
req = fakes.HTTPRequest.blank(request_url % fake.PROJECT_ID,
version=mv.GROUP_SNAPSHOTS)
if is_detail:
res_dict = self.controller.detail(req)
else:
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['group_snapshots']))
for index, snapshot in enumerate(self.g_snapshots_array):
self.assertEqual(snapshot.id,
res_dict['group_snapshots'][2 - index]['id'])
self.assertIsNotNone(
res_dict['group_snapshots'][2 - index]['name'])
if is_detail:
self.assertIn('description',
res_dict['group_snapshots'][2 - index].keys())
else:
self.assertNotIn('description',
res_dict['group_snapshots'][2 - index].keys())
@mock.patch('cinder.db.volume_type_get')
@mock.patch('cinder.quota.VolumeTypeQuotaEngine.reserve')
def test_create_group_snapshot_json(self, mock_quota, mock_vol_type):
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": self.group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=mv.GROUP_SNAPSHOTS)
res_dict = self.controller.create(req, body=body)
self.assertEqual(1, len(res_dict))
self.assertIn('id', res_dict['group_snapshot'])
group_snapshot = objects.GroupSnapshot.get_by_id(
context.get_admin_context(), res_dict['group_snapshot']['id'])
group_snapshot.destroy()
@mock.patch('cinder.db.volume_type_get')
def test_create_group_snapshot_when_volume_in_error_status(
self, mock_vol_type):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
status='error',
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=mv.GROUP_SNAPSHOTS)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body=body)
group.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
def test_create_group_snapshot_with_no_body(self):
# omit body from the request
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=mv.GROUP_SNAPSHOTS)
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=None)
def test_create_group_snapshot_with_empty_body(self):
# empty body in the request
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=mv.GROUP_SNAPSHOTS)
body = {"group_snapshot": {}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
@mock.patch.object(group_api.API, 'create_group_snapshot',
side_effect=exception.InvalidGroupSnapshot(
reason='Invalid group snapshot'))
def test_create_with_invalid_group_snapshot(self, mock_create_group_snap):
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": self.group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=mv.GROUP_SNAPSHOTS)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body=body)
@mock.patch.object(group_api.API, 'create_group_snapshot',
side_effect=exception.GroupSnapshotNotFound(
group_snapshot_id='invalid_id'))
def test_create_with_group_snapshot_not_found(self, mock_create_grp_snap):
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": self.group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=mv.GROUP_SNAPSHOTS)
self.assertRaises(exception.GroupSnapshotNotFound,
self.controller.create,
req, body=body)
def test_create_group_snapshot_from_empty_group(self):
empty_group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID])
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": empty_group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=mv.GROUP_SNAPSHOTS)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body=body)
empty_group.destroy()
def test_delete_group_snapshot_available(self):
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=self.group.id,
status=fields.GroupSnapshotStatus.AVAILABLE)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=mv.GROUP_SNAPSHOTS)
res_dict = self.controller.delete(req, group_snapshot.id)
group_snapshot = objects.GroupSnapshot.get_by_id(self.context,
group_snapshot.id)
self.assertEqual(http_client.ACCEPTED, res_dict.status_int)
self.assertEqual(fields.GroupSnapshotStatus.DELETING,
group_snapshot.status)
group_snapshot.destroy()
def test_delete_group_snapshot_available_used_as_source(self):
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=self.group.id,
status=fields.GroupSnapshotStatus.AVAILABLE)
group2 = utils.create_group(
self.context, status='creating',
group_snapshot_id=group_snapshot.id,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=mv.GROUP_SNAPSHOTS)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, group_snapshot.id)
group_snapshot.destroy()
group2.destroy()
def test_delete_group_snapshot_with_group_snapshot_NotFound(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID),
version=mv.GROUP_SNAPSHOTS)
self.assertRaises(exception.GroupSnapshotNotFound,
self.controller.delete,
req, fake.WILL_NOT_BE_FOUND_ID)
def test_delete_group_snapshot_with_invalid_group_snapshot(self):
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=self.group.id,
status='invalid')
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=mv.GROUP_SNAPSHOTS)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, group_snapshot.id)
group_snapshot.destroy()
def test_delete_group_snapshot_policy_not_authorized(self):
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=self.group.id,
status=fields.GroupSnapshotStatus.AVAILABLE)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/' %
(fake.PROJECT_ID, group_snapshot.id),
version=mv.GROUP_SNAPSHOTS,
use_admin_context=False)
rules = {
group_snapshots_policy.DELETE_POLICY: base_policy.RULE_ADMIN_API
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.addCleanup(policy.reset)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.delete,
req, group_snapshot.id)
@ddt.data((mv.GROUP_TYPE, 'fake_snapshot_001',
fields.GroupSnapshotStatus.AVAILABLE,
exception.VersionNotFoundForAPIMethod),
(mv.get_prior_version(mv.GROUP_SNAPSHOT_RESET_STATUS),
'fake_snapshot_001',
fields.GroupSnapshotStatus.AVAILABLE,
exception.VersionNotFoundForAPIMethod),
(mv.GROUP_SNAPSHOT_RESET_STATUS, 'fake_snapshot_001',
fields.GroupSnapshotStatus.AVAILABLE,
exception.GroupSnapshotNotFound))
@ddt.unpack
def test_reset_group_snapshot_status_illegal(self, version,
group_snapshot_id,
status, exceptions):
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' %
(fake.PROJECT_ID, group_snapshot_id),
version=version)
body = {"reset_status": {
"status": status
}}
self.assertRaises(exceptions,
self.controller.reset_status,
req, group_snapshot_id, body=body)
def test_reset_group_snapshot_status_invalid_status(self):
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=self.group.id,
status=fields.GroupSnapshotStatus.CREATING)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' %
(fake.PROJECT_ID, group_snapshot.id),
version=mv.GROUP_SNAPSHOT_RESET_STATUS)
body = {"reset_status": {
"status": "invalid_test_status"
}}
self.assertRaises(exception.InvalidGroupSnapshotStatus,
self.controller.reset_status,
req, group_snapshot.id, body=body)
group_snapshot.destroy()
def test_reset_group_snapshot_status(self):
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=self.group.id,
status=fields.GroupSnapshotStatus.CREATING)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' %
(fake.PROJECT_ID, group_snapshot.id),
version=mv.GROUP_SNAPSHOT_RESET_STATUS)
body = {"reset_status": {
"status": fields.GroupSnapshotStatus.AVAILABLE
}}
response = self.controller.reset_status(req, group_snapshot.id,
body=body)
g_snapshot = objects.GroupSnapshot.get_by_id(self.context,
group_snapshot.id)
self.assertEqual(http_client.ACCEPTED, response.status_int)
self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE,
g_snapshot.status)
group_snapshot.destroy()
@mock.patch('cinder.db.volume_type_get')
@mock.patch('cinder.quota.VolumeTypeQuotaEngine.reserve')
def test_create_group_snapshot_with_null_validate(
self, mock_quota, mock_vol_type):
body = {"group_snapshot": {"name": None,
"description": None,
"group_id": self.group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
self.context.project_id,
version=mv.GROUP_SNAPSHOTS)
res_dict = self.controller.create(req, body=body)
self.assertIn('group_snapshot', res_dict)
self.assertIsNone(res_dict['group_snapshot']['name'])
group_snapshot = objects.GroupSnapshot.get_by_id(
context.get_admin_context(), res_dict['group_snapshot']['id'])
group_snapshot.destroy()
|
|
"""This module implements the Scraper component which parses responses and
extracts information from them"""
from collections import deque
from twisted.python.failure import Failure
from twisted.internet import defer
from scrapy.utils.defer import defer_result, defer_succeed, parallel, iter_errback
from scrapy.utils.spider import iterate_spider_output
from scrapy.utils.misc import load_object
from scrapy.exceptions import CloseSpider, DropItem
from scrapy import signals
from scrapy.http import Request, Response
from scrapy.item import BaseItem
from scrapy.core.spidermw import SpiderMiddlewareManager
from scrapy import log
class Slot(object):
"""Scraper slot (one per running spider)"""
MIN_RESPONSE_SIZE = 1024
def __init__(self, max_active_size=5000000):
self.max_active_size = max_active_size
self.queue = deque()
self.active = set()
self.active_size = 0
self.itemproc_size = 0
self.closing = None
def add_response_request(self, response, request):
deferred = defer.Deferred()
self.queue.append((response, request, deferred))
if isinstance(response, Response):
self.active_size += max(len(response.body), self.MIN_RESPONSE_SIZE)
else:
self.active_size += self.MIN_RESPONSE_SIZE
return deferred
def next_response_request_deferred(self):
response, request, deferred = self.queue.popleft()
self.active.add(request)
return response, request, deferred
def finish_response(self, response, request):
self.active.remove(request)
if isinstance(response, Response):
self.active_size -= max(len(response.body), self.MIN_RESPONSE_SIZE)
else:
self.active_size -= self.MIN_RESPONSE_SIZE
def is_idle(self):
return not (self.queue or self.active)
def needs_backout(self):
return self.active_size > self.max_active_size
class Scraper(object):
def __init__(self, crawler):
self.slots = {}
self.spidermw = SpiderMiddlewareManager.from_crawler(crawler)
itemproc_cls = load_object(crawler.settings['ITEM_PROCESSOR'])
self.itemproc = itemproc_cls.from_crawler(crawler)
self.concurrent_items = crawler.settings.getint('CONCURRENT_ITEMS')
self.crawler = crawler
self.signals = crawler.signals
self.logformatter = crawler.logformatter
@defer.inlineCallbacks
def open_spider(self, spider):
"""Open the given spider for scraping and allocate resources for it"""
assert spider not in self.slots, "Spider already opened: %s" % spider
self.slots[spider] = Slot()
yield self.itemproc.open_spider(spider)
def close_spider(self, spider):
"""Close a spider being scraped and release its resources"""
assert spider in self.slots, "Spider not opened: %s" % spider
slot = self.slots[spider]
slot.closing = defer.Deferred()
slot.closing.addCallback(self.itemproc.close_spider)
self._check_if_closing(spider, slot)
return slot.closing
def is_idle(self):
"""Return True if there isn't any more spiders to process"""
return not self.slots
def _check_if_closing(self, spider, slot):
if slot.closing and slot.is_idle():
del self.slots[spider]
slot.closing.callback(spider)
def enqueue_scrape(self, response, request, spider):
slot = self.slots[spider]
dfd = slot.add_response_request(response, request)
def finish_scraping(_):
slot.finish_response(response, request)
self._check_if_closing(spider, slot)
self._scrape_next(spider, slot)
return _
dfd.addBoth(finish_scraping)
dfd.addErrback(log.err, 'Scraper bug processing %s' % request, \
spider=spider)
self._scrape_next(spider, slot)
return dfd
def _scrape_next(self, spider, slot):
while slot.queue:
response, request, deferred = slot.next_response_request_deferred()
self._scrape(response, request, spider).chainDeferred(deferred)
def _scrape(self, response, request, spider):
"""Handle the downloaded response or failure trough the spider
callback/errback"""
assert isinstance(response, (Response, Failure))
dfd = self._scrape2(response, request, spider) # returns spiders processed output
dfd.addErrback(self.handle_spider_error, request, response, spider)
dfd.addCallback(self.handle_spider_output, request, response, spider)
return dfd
def _scrape2(self, request_result, request, spider):
"""Handle the diferent cases of request's result been a Response or a
Failure"""
if not isinstance(request_result, Failure):
return self.spidermw.scrape_response(self.call_spider, \
request_result, request, spider)
else:
# FIXME: don't ignore errors in spider middleware
dfd = self.call_spider(request_result, request, spider)
return dfd.addErrback(self._log_download_errors, \
request_result, request, spider)
def call_spider(self, result, request, spider):
result.request = request
dfd = defer_result(result)
dfd.addCallbacks(request.callback or spider.parse, request.errback)
return dfd.addCallback(iterate_spider_output)
def handle_spider_error(self, _failure, request, response, spider):
exc = _failure.value
if isinstance(exc, CloseSpider):
self.crawler.engine.close_spider(spider, exc.reason or 'cancelled')
return
log.err(_failure, "Spider error processing %s" % request, spider=spider)
self.signals.send_catch_log(signal=signals.spider_error, failure=_failure, response=response, \
spider=spider)
self.crawler.stats.inc_value("spider_exceptions/%s" % _failure.value.__class__.__name__, \
spider=spider)
def handle_spider_output(self, result, request, response, spider):
if not result:
return defer_succeed(None)
it = iter_errback(result, self.handle_spider_error, request, response, spider)
dfd = parallel(it, self.concurrent_items,
self._process_spidermw_output, request, response, spider)
return dfd
def _process_spidermw_output(self, output, request, response, spider):
"""Process each Request/Item (given in the output parameter) returned
from the given spider
"""
if isinstance(output, Request):
self.signals.send_catch_log(signal=signals.request_received, request=output, \
spider=spider)
self.crawler.engine.crawl(request=output, spider=spider)
elif isinstance(output, BaseItem):
self.slots[spider].itemproc_size += 1
dfd = self.itemproc.process_item(output, spider)
dfd.addBoth(self._itemproc_finished, output, response, spider)
return dfd
elif output is None:
pass
else:
typename = type(output).__name__
log.msg(format='Spider must return Request, BaseItem or None, '
'got %(typename)r in %(request)s',
level=log.ERROR, spider=spider, request=request, typename=typename)
def _log_download_errors(self, spider_failure, download_failure, request, spider):
"""Log and silence errors that come from the engine (typically download
errors that got propagated thru here)
"""
if spider_failure is download_failure:
errmsg = spider_failure.getErrorMessage()
if errmsg:
log.msg(format='Error downloading %(request)s: %(errmsg)s',
level=log.ERROR, spider=spider, request=request, errmsg=errmsg)
return
return spider_failure
def _itemproc_finished(self, output, item, response, spider):
"""ItemProcessor finished for the given ``item`` and returned ``output``
"""
self.slots[spider].itemproc_size -= 1
if isinstance(output, Failure):
ex = output.value
if isinstance(ex, DropItem):
logkws = self.logformatter.dropped(item, ex, response, spider)
log.msg(spider=spider, **logkws)
return self.signals.send_catch_log_deferred(signal=signals.item_dropped, \
item=item, spider=spider, exception=output.value)
else:
log.err(output, 'Error processing %s' % item, spider=spider)
else:
logkws = self.logformatter.scraped(output, response, spider)
log.msg(spider=spider, **logkws)
return self.signals.send_catch_log_deferred(signal=signals.item_scraped, \
item=output, response=response, spider=spider)
|
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from . import model, modification, test_common, test_config
mock = test_common.import_mock()
def plist_read(*args):
bundle_id = test_config.TestConfig().base_bundle_id
plists = {
'/$W/App Product.app/Contents/Info.plist': {
'CFBundleDisplayName': 'Product',
'CFBundleIdentifier': bundle_id,
'CFBundleName': 'Product',
'KSProductID': 'test.ksproduct',
'KSChannelID-full': '-full',
},
'/$W/App Product Canary.app/Contents/Frameworks/Product Framework.framework/Helpers/Product Helper (Alerts).app/Contents/Info.plist':
{
'CFBundleIdentifier': bundle_id + '.AlertNotificationService'
},
'/$W/App Product Canary.app/Contents/Frameworks/Product Framework.framework/Helpers/Product Helper (Alerts).app/Contents/Resources/base.lproj/InfoPlist.strings':
{
'CFBundleDisplayName': 'Product'
},
'/$W/app-entitlements.plist': {
'com.apple.application-identifier': bundle_id
},
'/$W/helper-renderer-entitlements.plist': {},
'/$W/helper-gpu-entitlements.plist': {},
'/$W/helper-plugin-entitlements.plist': {},
'/$W/App Product Canary.app/Contents/Resources/test.signing.bundle_id.canary.manifest/Contents/Resources/test.signing.bundle_id.canary.manifest':
{
'pfm_domain': bundle_id
}
}
plists['/$W/App Product Canary.app/Contents/Info.plist'] = plists[
'/$W/App Product.app/Contents/Info.plist']
return plists[args[0]]
def plist_read_with_architecture(*args):
plist = plist_read(*args)
plist.update({'KSChannelID': 'arm64', 'KSChannelID-full': 'arm64-full'})
return plist
@mock.patch('signing.commands.read_plist', side_effect=plist_read)
@mock.patch.multiple(
'signing.commands', **{
m: mock.DEFAULT for m in ('copy_files', 'move_file', 'make_dir',
'write_file', 'run_command', 'write_plist')
})
class TestModification(unittest.TestCase):
def setUp(self):
self.paths = model.Paths('/$I', '/$O', '/$W')
self.config = test_config.TestConfig()
def _is_framework_unchanged(self, mocks):
# Determines whether any modifications were made within the framework
# according to calls to any of the mocked calls in |mocks|. This is done
# by examining the calls' arguments for a substring pointing into the
# framework.
def _do_mock_calls_mention_framework(mock_calls):
for call in mock_calls:
for tup in call:
for arg in tup:
# Don't anchor this substring in a particular directory
# because it may appear in any of /$I, /$O, or /$W.
# Don't anchor it with App Product.app either, because
# it may be renamed (to App Product Canary.app).
if 'Contents/Frameworks/Product Framework.framework' in arg:
return True
return False
for mocked in mocks.values():
if _do_mock_calls_mention_framework(mocked.mock_calls):
return False
return True
def test_base_distribution(self, read_plist, **kwargs):
dist = model.Distribution()
config = dist.to_config(self.config)
modification.customize_distribution(self.paths, dist, config)
self.assertEqual(1, kwargs['write_plist'].call_count)
kwargs['write_plist'].assert_called_with(
{
'CFBundleDisplayName': 'Product',
'CFBundleIdentifier': config.base_bundle_id,
'CFBundleName': 'Product',
'KSProductID': 'test.ksproduct',
'KSChannelID-full': '-full'
}, '/$W/App Product.app/Contents/Info.plist', 'xml1')
self.assertEqual(4, kwargs['copy_files'].call_count)
kwargs['copy_files'].assert_has_calls([
mock.call('/$I/Product Packaging/app-entitlements.plist',
'/$W/app-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-gpu-entitlements.plist',
'/$W/helper-gpu-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-plugin-entitlements.plist',
'/$W/helper-plugin-entitlements.plist'),
mock.call(
'/$I/Product Packaging/helper-renderer-entitlements.plist',
'/$W/helper-renderer-entitlements.plist'),
])
self.assertEqual(0, kwargs['move_file'].call_count)
self.assertEqual(0, kwargs['write_file'].call_count)
self.assertTrue(self._is_framework_unchanged(kwargs))
def test_distribution_with_architecture(self, read_plist, **kwargs):
read_plist.side_effect = plist_read_with_architecture
dist = model.Distribution()
config = dist.to_config(self.config)
modification.customize_distribution(self.paths, dist, config)
self.assertEqual(1, kwargs['write_plist'].call_count)
kwargs['write_plist'].assert_called_with(
{
'CFBundleDisplayName': 'Product',
'CFBundleIdentifier': config.base_bundle_id,
'CFBundleName': 'Product',
'KSProductID': 'test.ksproduct',
'KSChannelID': 'arm64',
'KSChannelID-full': 'arm64-full'
}, '/$W/App Product.app/Contents/Info.plist', 'xml1')
self.assertEqual(4, kwargs['copy_files'].call_count)
kwargs['copy_files'].assert_has_calls([
mock.call('/$I/Product Packaging/app-entitlements.plist',
'/$W/app-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-gpu-entitlements.plist',
'/$W/helper-gpu-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-plugin-entitlements.plist',
'/$W/helper-plugin-entitlements.plist'),
mock.call(
'/$I/Product Packaging/helper-renderer-entitlements.plist',
'/$W/helper-renderer-entitlements.plist'),
])
self.assertEqual(0, kwargs['move_file'].call_count)
self.assertEqual(0, kwargs['write_file'].call_count)
self.assertTrue(self._is_framework_unchanged(kwargs))
def test_distribution_with_brand(self, read_plist, **kwargs):
dist = model.Distribution(branding_code='MOO')
config = dist.to_config(self.config)
modification.customize_distribution(self.paths, dist, config)
self.assertEqual(1, kwargs['write_plist'].call_count)
kwargs['write_plist'].assert_called_with(
{
'CFBundleDisplayName': 'Product',
'CFBundleIdentifier': config.base_bundle_id,
'CFBundleName': 'Product',
'KSProductID': 'test.ksproduct',
'KSBrandID': 'MOO',
'KSChannelID-full': '-full'
}, '/$W/App Product.app/Contents/Info.plist', 'xml1')
self.assertEqual(4, kwargs['copy_files'].call_count)
kwargs['copy_files'].assert_has_calls([
mock.call('/$I/Product Packaging/app-entitlements.plist',
'/$W/app-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-gpu-entitlements.plist',
'/$W/helper-gpu-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-plugin-entitlements.plist',
'/$W/helper-plugin-entitlements.plist'),
mock.call(
'/$I/Product Packaging/helper-renderer-entitlements.plist',
'/$W/helper-renderer-entitlements.plist'),
])
self.assertEqual(0, kwargs['move_file'].call_count)
self.assertTrue(self._is_framework_unchanged(kwargs))
def test_distribution_with_channel(self, read_plist, **kwargs):
dist = model.Distribution(channel='dev')
config = dist.to_config(self.config)
modification.customize_distribution(self.paths, dist, config)
self.assertEqual(1, kwargs['write_plist'].call_count)
kwargs['write_plist'].assert_called_with(
{
'CFBundleDisplayName': 'Product',
'CFBundleIdentifier': config.base_bundle_id,
'CFBundleName': 'Product',
'KSProductID': 'test.ksproduct',
'KSChannelID': 'dev',
'KSChannelID-full': 'dev-full'
}, '/$W/App Product.app/Contents/Info.plist', 'xml1')
self.assertEqual(4, kwargs['copy_files'].call_count)
kwargs['copy_files'].assert_has_calls([
mock.call('/$I/Product Packaging/app-entitlements.plist',
'/$W/app-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-gpu-entitlements.plist',
'/$W/helper-gpu-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-plugin-entitlements.plist',
'/$W/helper-plugin-entitlements.plist'),
mock.call(
'/$I/Product Packaging/helper-renderer-entitlements.plist',
'/$W/helper-renderer-entitlements.plist'),
])
self.assertEqual(0, kwargs['move_file'].call_count)
self.assertEqual(0, kwargs['write_file'].call_count)
self.assertTrue(self._is_framework_unchanged(kwargs))
def test_distribution_with_architecture_and_channel(self, read_plist,
**kwargs):
read_plist.side_effect = plist_read_with_architecture
dist = model.Distribution(channel='dev')
config = dist.to_config(self.config)
modification.customize_distribution(self.paths, dist, config)
self.assertEqual(1, kwargs['write_plist'].call_count)
kwargs['write_plist'].assert_called_with(
{
'CFBundleDisplayName': 'Product',
'CFBundleIdentifier': config.base_bundle_id,
'CFBundleName': 'Product',
'KSProductID': 'test.ksproduct',
'KSChannelID': 'arm64-dev',
'KSChannelID-full': 'arm64-dev-full'
}, '/$W/App Product.app/Contents/Info.plist', 'xml1')
self.assertEqual(4, kwargs['copy_files'].call_count)
kwargs['copy_files'].assert_has_calls([
mock.call('/$I/Product Packaging/app-entitlements.plist',
'/$W/app-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-gpu-entitlements.plist',
'/$W/helper-gpu-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-plugin-entitlements.plist',
'/$W/helper-plugin-entitlements.plist'),
mock.call(
'/$I/Product Packaging/helper-renderer-entitlements.plist',
'/$W/helper-renderer-entitlements.plist'),
])
self.assertEqual(0, kwargs['move_file'].call_count)
self.assertEqual(0, kwargs['write_file'].call_count)
self.assertTrue(self._is_framework_unchanged(kwargs))
def test_distribution_with_product_dirname(self, read_plist, **kwargs):
dist = model.Distribution(product_dirname='Farmland/Cows')
config = dist.to_config(self.config)
modification.customize_distribution(self.paths, dist, config)
self.assertEqual(1, kwargs['write_plist'].call_count)
kwargs['write_plist'].assert_called_with(
{
'CFBundleDisplayName': 'Product',
'CFBundleIdentifier': config.base_bundle_id,
'CFBundleName': 'Product',
'KSProductID': 'test.ksproduct',
'KSChannelID-full': '-full',
'CrProductDirName': 'Farmland/Cows'
}, '/$W/App Product.app/Contents/Info.plist', 'xml1')
self.assertEqual(4, kwargs['copy_files'].call_count)
kwargs['copy_files'].assert_has_calls([
mock.call('/$I/Product Packaging/app-entitlements.plist',
'/$W/app-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-gpu-entitlements.plist',
'/$W/helper-gpu-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-plugin-entitlements.plist',
'/$W/helper-plugin-entitlements.plist'),
mock.call(
'/$I/Product Packaging/helper-renderer-entitlements.plist',
'/$W/helper-renderer-entitlements.plist'),
])
self.assertEqual(0, kwargs['move_file'].call_count)
self.assertEqual(0, kwargs['write_file'].call_count)
self.assertTrue(self._is_framework_unchanged(kwargs))
def test_distribution_with_creator_code(self, read_plist, **kwargs):
dist = model.Distribution(creator_code='Mooo')
config = dist.to_config(self.config)
modification.customize_distribution(self.paths, dist, config)
self.assertEqual(1, kwargs['write_plist'].call_count)
kwargs['write_plist'].assert_called_with(
{
'CFBundleDisplayName': 'Product',
'CFBundleIdentifier': config.base_bundle_id,
'CFBundleName': 'Product',
'KSProductID': 'test.ksproduct',
'KSChannelID-full': '-full',
'CFBundleSignature': 'Mooo'
}, '/$W/App Product.app/Contents/Info.plist', 'xml1')
self.assertEqual(4, kwargs['copy_files'].call_count)
kwargs['copy_files'].assert_has_calls([
mock.call('/$I/Product Packaging/app-entitlements.plist',
'/$W/app-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-gpu-entitlements.plist',
'/$W/helper-gpu-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-plugin-entitlements.plist',
'/$W/helper-plugin-entitlements.plist'),
mock.call(
'/$I/Product Packaging/helper-renderer-entitlements.plist',
'/$W/helper-renderer-entitlements.plist'),
])
kwargs['write_file'].assert_called_once_with(
'/$W/App Product.app/Contents/PkgInfo', 'APPLMooo')
self.assertEqual(0, kwargs['move_file'].call_count)
def test_distribution_with_brand_and_channel(self, read_plist, **kwargs):
dist = model.Distribution(channel='beta', branding_code='RAWR')
config = dist.to_config(self.config)
modification.customize_distribution(self.paths, dist, config)
self.assertEqual(1, kwargs['write_plist'].call_count)
kwargs['write_plist'].assert_called_with(
{
'CFBundleDisplayName': 'Product',
'CFBundleIdentifier': config.base_bundle_id,
'CFBundleName': 'Product',
'KSProductID': 'test.ksproduct',
'KSChannelID': 'beta',
'KSChannelID-full': 'beta-full',
'KSBrandID': 'RAWR'
}, '/$W/App Product.app/Contents/Info.plist', 'xml1')
self.assertEqual(4, kwargs['copy_files'].call_count)
kwargs['copy_files'].assert_has_calls([
mock.call('/$I/Product Packaging/app-entitlements.plist',
'/$W/app-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-gpu-entitlements.plist',
'/$W/helper-gpu-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-plugin-entitlements.plist',
'/$W/helper-plugin-entitlements.plist'),
mock.call(
'/$I/Product Packaging/helper-renderer-entitlements.plist',
'/$W/helper-renderer-entitlements.plist'),
])
self.assertEqual(0, kwargs['move_file'].call_count)
self.assertEqual(0, kwargs['write_file'].call_count)
def test_customize_channel(self, read_plist, **kwargs):
dist = model.Distribution(
channel='canary',
app_name_fragment='Canary',
product_dirname='Acme/Product Canary',
creator_code='Mooo',
channel_customize=True)
config = dist.to_config(self.config)
modification.customize_distribution(self.paths, dist, config)
# Order of file moves is significant.
self.assertEqual(kwargs['move_file'].mock_calls, [
mock.call('/$W/App Product.app', '/$W/App Product Canary.app'),
mock.call(
'/$W/App Product Canary.app/Contents/MacOS/App Product',
'/$W/App Product Canary.app/Contents/MacOS/App Product Canary'),
mock.call(
'/$W/App Product Canary.app/Contents/Resources/test.signing.bundle_id.manifest/Contents/Resources/test.signing.bundle_id.manifest',
'/$W/App Product Canary.app/Contents/Resources/test.signing.bundle_id.manifest/Contents/Resources/test.signing.bundle_id.canary.manifest'
),
mock.call(
'/$W/App Product Canary.app/Contents/Resources/test.signing.bundle_id.manifest',
'/$W/App Product Canary.app/Contents/Resources/test.signing.bundle_id.canary.manifest'
),
])
self.assertEqual(7, kwargs['copy_files'].call_count)
kwargs['copy_files'].assert_has_calls([
mock.call('/$I/Product Packaging/app-entitlements.plist',
'/$W/app-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-gpu-entitlements.plist',
'/$W/helper-gpu-entitlements.plist'),
mock.call('/$I/Product Packaging/helper-plugin-entitlements.plist',
'/$W/helper-plugin-entitlements.plist'),
mock.call(
'/$I/Product Packaging/helper-renderer-entitlements.plist',
'/$W/helper-renderer-entitlements.plist'),
mock.call('/$I/Product Packaging/app_canary.icns',
'/$W/App Product Canary.app/Contents/Resources/app.icns'),
mock.call(
'/$I/Product Packaging/document_canary.icns',
'/$W/App Product Canary.app/Contents/Resources/document.icns'),
mock.call(
'/$I/Product Packaging/app_canary.icns',
'/$W/App Product Canary.app/Contents/Frameworks/Product Framework.framework/Helpers/Product Helper (Alerts).app/Contents/Resources/app.icns'
)
])
kwargs['write_file'].assert_called_once_with(
'/$W/App Product Canary.app/Contents/PkgInfo', 'APPLMooo')
self.assertEqual(8, kwargs['write_plist'].call_count)
kwargs['write_plist'].assert_has_calls([
mock.call(
{
'CFBundleIdentifier':
'test.signing.bundle_id.canary.AlertNotificationService'
},
'/$W/App Product Canary.app/Contents/Frameworks/Product Framework.framework/Helpers/Product Helper (Alerts).app/Contents/Info.plist',
'xml1'),
mock.call({
'CFBundleDisplayName': 'Product Canary'
}, '/$W/App Product Canary.app/Contents/Frameworks/Product Framework.framework/Helpers/Product Helper (Alerts).app/Contents/Resources/base.lproj/InfoPlist.strings',
'binary1'),
mock.call(
{
'CFBundleDisplayName': 'Product Canary',
'CFBundleIdentifier': config.base_bundle_id,
'CFBundleExecutable': config.app_product,
'CFBundleName': 'Product Canary',
'KSProductID': 'test.ksproduct.canary',
'KSChannelID': 'canary',
'KSChannelID-full': 'canary-full',
'CrProductDirName': 'Acme/Product Canary',
'CFBundleSignature': 'Mooo'
}, '/$W/App Product Canary.app/Contents/Info.plist', 'xml1'),
mock.call(
{
'com.apple.application-identifier':
'test.signing.bundle_id.canary'
}, '/$W/app-entitlements.plist', 'xml1'),
mock.call({}, '/$W/helper-gpu-entitlements.plist', 'xml1'),
mock.call({}, '/$W/helper-plugin-entitlements.plist', 'xml1'),
mock.call({}, '/$W/helper-renderer-entitlements.plist', 'xml1'),
mock.call({
'pfm_domain': 'test.signing.bundle_id.canary'
}, '/$W/App Product Canary.app/Contents/Resources/test.signing.bundle_id.canary.manifest/Contents/Resources/test.signing.bundle_id.canary.manifest',
'xml1')
])
self.assertFalse(self._is_framework_unchanged(kwargs))
def test_get_task_allow_no_channel_customize(self, read_plist, **kwargs):
dist = model.Distribution()
self.config = test_config.TestConfigInjectGetTaskAllow()
config = dist.to_config(self.config)
modification.customize_distribution(self.paths, dist, config)
self.assertEqual(5, kwargs['write_plist'].call_count)
kwargs['write_plist'].assert_has_calls([
mock.call(
{
'CFBundleDisplayName': 'Product',
'CFBundleIdentifier': config.base_bundle_id,
'CFBundleName': 'Product',
'KSProductID': 'test.ksproduct',
'KSChannelID-full': '-full'
}, '/$W/App Product.app/Contents/Info.plist', 'xml1'),
mock.call(
{
'com.apple.security.get-task-allow': True,
'com.apple.application-identifier': config.base_bundle_id
}, '/$W/app-entitlements.plist', 'xml1'),
mock.call({'com.apple.security.get-task-allow': True},
'/$W/helper-gpu-entitlements.plist', 'xml1'),
mock.call({'com.apple.security.get-task-allow': True},
'/$W/helper-plugin-entitlements.plist', 'xml1'),
mock.call({'com.apple.security.get-task-allow': True},
'/$W/helper-renderer-entitlements.plist', 'xml1'),
])
def test_get_task_allow_customize_channel(self, read_plist, **kwargs):
dist = model.Distribution(
channel='canary',
app_name_fragment='Canary',
product_dirname='Acme/Product Canary',
creator_code='Mooo',
channel_customize=True)
self.config = test_config.TestConfigInjectGetTaskAllow()
config = dist.to_config(self.config)
modification.customize_distribution(self.paths, dist, config)
self.assertEqual(8, kwargs['write_plist'].call_count)
kwargs['write_plist'].assert_has_calls([
mock.call(
{
'CFBundleIdentifier':
'test.signing.bundle_id.canary.AlertNotificationService'
},
'/$W/App Product Canary.app/Contents/Frameworks/Product Framework.framework/Helpers/Product Helper (Alerts).app/Contents/Info.plist',
'xml1'),
mock.call({
'CFBundleDisplayName': 'Product Canary'
}, '/$W/App Product Canary.app/Contents/Frameworks/Product Framework.framework/Helpers/Product Helper (Alerts).app/Contents/Resources/base.lproj/InfoPlist.strings',
'binary1'),
mock.call(
{
'CFBundleDisplayName': 'Product Canary',
'CFBundleIdentifier': config.base_bundle_id,
'CFBundleExecutable': config.app_product,
'CFBundleName': 'Product Canary',
'KSProductID': 'test.ksproduct.canary',
'KSChannelID': 'canary',
'KSChannelID-full': 'canary-full',
'CrProductDirName': 'Acme/Product Canary',
'CFBundleSignature': 'Mooo'
}, '/$W/App Product Canary.app/Contents/Info.plist', 'xml1'),
mock.call(
{
'com.apple.security.get-task-allow':
True,
'com.apple.application-identifier':
'test.signing.bundle_id.canary'
}, '/$W/app-entitlements.plist', 'xml1'),
mock.call({'com.apple.security.get-task-allow': True},
'/$W/helper-gpu-entitlements.plist', 'xml1'),
mock.call({'com.apple.security.get-task-allow': True},
'/$W/helper-plugin-entitlements.plist', 'xml1'),
mock.call({'com.apple.security.get-task-allow': True},
'/$W/helper-renderer-entitlements.plist', 'xml1'),
mock.call({
'pfm_domain': 'test.signing.bundle_id.canary'
}, '/$W/App Product Canary.app/Contents/Resources/test.signing.bundle_id.canary.manifest/Contents/Resources/test.signing.bundle_id.canary.manifest',
'xml1')
])
|
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_snapshot
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manage snapshots of the running states of selected features.
description:
- Create snapshots of the running states of selected features, add
new show commands for snapshot creation, delete and compare
existing snapshots.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(transport=cli) may cause timeout errors.
- The C(element_key1) and C(element_key2) parameter specify the tags used
to distinguish among row entries. In most cases, only the element_key1
parameter needs to specified to be able to distinguish among row entries.
- C(action=compare) will always store a comparison report on a local file.
options:
action:
description:
- Define what snapshot action the module would perform.
required: true
choices: ['create','add','compare','delete']
snapshot_name:
description:
- Snapshot name, to be used when C(action=create)
or C(action=delete).
required: false
default: null
description:
description:
- Snapshot description to be used when C(action=create).
required: false
default: null
snapshot1:
description:
- First snapshot to be used when C(action=compare).
required: false
default: null
snapshot2:
description:
- Second snapshot to be used when C(action=compare).
required: false
default: null
comparison_results_file:
description:
- Name of the file where snapshots comparison will be store.
required: false
default: null
compare_option:
description:
- Snapshot options to be used when C(action=compare).
required: false
default: null
choices: ['summary','ipv4routes','ipv6routes']
section:
description:
- Used to name the show command output, to be used
when C(action=add).
required: false
default: null
show_command:
description:
- Specify a new show command, to be used when C(action=add).
required: false
default: null
row_id:
description:
- Specifies the tag of each row entry of the show command's
XML output, to be used when C(action=add).
required: false
default: null
element_key1:
description:
- Specify the tags used to distinguish among row entries,
to be used when C(action=add).
required: false
default: null
element_key2:
description:
- Specify the tags used to distinguish among row entries,
to be used when C(action=add).
required: false
default: null
save_snapshot_locally:
description:
- Specify to locally store a new created snapshot,
to be used when C(action=create).
required: false
default: false
choices: ['true','false']
path:
description:
- Specify the path of the file where new created snapshot or
snapshots comparison will be stored, to be used when
C(action=create) and C(save_snapshot_locally=true) or
C(action=compare).
required: false
default: './'
'''
EXAMPLES = '''
# Create a snapshot and store it locally
- nxos_snapshot:
action: create
snapshot_name: test_snapshot
description: Done with Ansible
save_snapshot_locally: true
path: /home/user/snapshots/
# Delete a snapshot
- nxos_snapshot:
action: delete
snapshot_name: test_snapshot
# Delete all existing snapshots
- nxos_snapshot:
action: delete_all
# Add a show command for snapshots creation
- nxos_snapshot:
section: myshow
show_command: show ip interface brief
row_id: ROW_intf
element_key1: intf-name
# Compare two snapshots
- nxos_snapshot:
action: compare
snapshot1: pre_snapshot
snapshot2: post_snapshot
comparison_results_file: compare_snapshots.txt
compare_option: summary
path: '../snapshot_reports/'
'''
RETURN = '''
commands:
description: commands sent to the device
returned: verbose mode
type: list
sample: ["snapshot create post_snapshot Post-snapshot"]
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
def execute_show_command(command, module):
command = [{
'command': command,
'output': 'text',
}]
return run_commands(module, command)
def get_existing(module):
existing = []
command = 'show snapshots'
body = execute_show_command(command, module)[0]
if body:
split_body = body.splitlines()
snapshot_regex = ('(?P<name>\S+)\s+(?P<date>\w+\s+\w+\s+\d+\s+\d+'
':\d+:\d+\s+\d+)\s+(?P<description>.*)')
for snapshot in split_body:
temp = {}
try:
match_snapshot = re.match(snapshot_regex, snapshot, re.DOTALL)
snapshot_group = match_snapshot.groupdict()
temp['name'] = snapshot_group['name']
temp['date'] = snapshot_group['date']
temp['description'] = snapshot_group['description']
existing.append(temp)
except AttributeError:
pass
return existing
def action_create(module, existing_snapshots):
commands = list()
exist = False
for snapshot in existing_snapshots:
if module.params['snapshot_name'] == snapshot['name']:
exist = True
if exist is False:
commands.append('snapshot create {0} {1}'.format(
module.params['snapshot_name'], module.params['description']))
return commands
def action_add(module, existing_snapshots):
commands = list()
command = 'show snapshot sections'
sections = []
body = execute_show_command(command, module)[0]
if body:
section_regex = '.*\[(?P<section>\S+)\].*'
split_body = body.split('\n\n')
for section in split_body:
temp = {}
for line in section.splitlines():
try:
match_section = re.match(section_regex, section, re.DOTALL)
temp['section'] = match_section.groupdict()['section']
except (AttributeError, KeyError):
pass
if 'show command' in line:
temp['show_command'] = line.split('show command: ')[1]
elif 'row id' in line:
temp['row_id'] = line.split('row id: ')[1]
elif 'key1' in line:
temp['element_key1'] = line.split('key1: ')[1]
elif 'key2' in line:
temp['element_key2'] = line.split('key2: ')[1]
if temp:
sections.append(temp)
proposed = {
'section': module.params['section'],
'show_command': module.params['show_command'],
'row_id': module.params['row_id'],
'element_key1': module.params['element_key1'],
'element_key2': module.params['element_key2'] or '-',
}
if proposed not in sections:
if module.params['element_key2']:
commands.append('snapshot section add {0} "{1}" {2} {3} {4}'.format(
module.params['section'], module.params['show_command'],
module.params['row_id'], module.params['element_key1'],
module.params['element_key2']))
else:
commands.append('snapshot section add {0} "{1}" {2} {3}'.format(
module.params['section'], module.params['show_command'],
module.params['row_id'], module.params['element_key1']))
return commands
def action_compare(module, existing_snapshots):
command = 'show snapshot compare {0} {1}'.format(
module.params['snapshot1'], module.params['snapshot2'])
if module.params['compare_option']:
command += ' {0}'.format(module.params['compare_option'])
body = execute_show_command(command, module)[0]
return body
def action_delete(module, existing_snapshots):
commands = list()
exist = False
for snapshot in existing_snapshots:
if module.params['snapshot_name'] == snapshot['name']:
exist = True
if exist:
commands.append('snapshot delete {0}'.format(
module.params['snapshot_name']))
return commands
def action_delete_all(module, existing_snapshots):
commands = list()
if existing_snapshots:
commands.append('snapshot delete all')
return commands
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_snapshot(module):
command = 'show snapshot dump {0}'.format(module.params['snapshot_name'])
body = execute_show_command(command, module)[0]
return body
def write_on_file(content, filename, module):
path = module.params['path']
if path[-1] != '/':
path += '/'
filepath = '{0}{1}'.format(path, filename)
try:
report = open(filepath, 'w')
report.write(content)
report.close()
except:
module.fail_json(msg="Error while writing on file.")
return filepath
def main():
argument_spec = dict(
action=dict(required=True, choices=['create', 'add', 'compare', 'delete', 'delete_all']),
snapshot_name=dict(type='str'),
description=dict(type='str'),
snapshot1=dict(type='str'),
snapshot2=dict(type='str'),
compare_option=dict(choices=['summary', 'ipv4routes', 'ipv6routes']),
comparison_results_file=dict(type='str'),
section=dict(type='str'),
show_command=dict(type='str'),
row_id=dict(type='str'),
element_key1=dict(type='str'),
element_key2=dict(type='str'),
save_snapshot_locally=dict(type='bool', default=False),
path=dict(type='str', default='./')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
action = module.params['action']
comparison_results_file = module.params['comparison_results_file']
CREATE_PARAMS = ['snapshot_name', 'description']
ADD_PARAMS = ['section', 'show_command', 'row_id', 'element_key1']
COMPARE_PARAMS = ['snapshot1', 'snapshot2', 'comparison_results_file']
if not os.path.isdir(module.params['path']):
module.fail_json(msg='{0} is not a valid directory name.'.format(
module.params['path']))
if action == 'create':
for param in CREATE_PARAMS:
if not module.params[param]:
module.fail_json(msg='snapshot_name and description are '
'required when action=create')
elif action == 'add':
for param in ADD_PARAMS:
if not module.params[param]:
module.fail_json(msg='section, show_command, row_id '
'and element_key1 are required '
'when action=add')
elif action == 'compare':
for param in COMPARE_PARAMS:
if not module.params[param]:
module.fail_json(msg='snapshot1 and snapshot2 are required '
'when action=create')
elif action == 'delete' and not module.params['snapshot_name']:
module.fail_json(msg='snapshot_name is required when action=delete')
existing_snapshots = invoke('get_existing', module)
action_results = invoke('action_%s' % action, module, existing_snapshots)
result = {'changed': False, 'commands': []}
if not module.check_mode:
if action == 'compare':
result['commands'] = []
else:
if action_results:
load_config(module, action_results)
result['commands'] = action_results
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
|
from __future__ import print_function
from django.test import LiveServerTestCase
from selenium import webdriver
import os
from django.contrib.auth.models import User
# from imager_images.models import Photo, Album
import factory
import time
SCREEN_DUMP_LOCATION = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'screendumps'
)
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
django_get_or_create = ('username',)
username = 'bob'
password = factory.PostGenerationMethodCall('set_password', 'password')
class TestLogin(LiveServerTestCase):
def setUp(self):
self.driver1 = webdriver.Firefox()
self.driver2 = webdriver.Chrome()
super(TestLogin, self).setUp()
self.bob = UserFactory.create()
self.bob_profile = self.bob.profile
self.alice = UserFactory.create(username='alice')
self.alice_profile = self.alice.profile
def tearDown(self):
self.driver1.quit()
self.driver2.quit()
super(TestLogin, self).tearDown()
def test_chatroom_simple(self):
# bob finds his way to homepage, and logs in
self.driver1.get(self.live_server_url)
self.assertIn('Login', self.driver1.page_source)
self.driver1.find_element_by_link_text("Login").click()
form = self.driver1.find_element_by_tag_name("form")
username_field = self.driver1.find_element_by_id("id_username")
username_field.send_keys("bob")
password_field = self.driver1.find_element_by_id("id_password")
password_field.send_keys("password")
form.submit()
# having logged in, bob sees his profile page.
self.assertIn('Logout', self.driver1.page_source)
self.assertIn('Welcome, bob!', self.driver1.page_source)
# wanting to socialize with other nerds, bob goes back
# to the homepage to look for chatrooms.
self.driver1.find_element_by_link_text("Home").click()
self.assertIn('Super Smash Brothers', self.driver1.page_source)
self.assertIn('Destiny', self.driver1.page_source)
# having seen a chatroom bob'd like to visit, bob
# enters the chat list, and looks for rooms.
self.driver1.find_element_by_link_text("Destiny").click()
self.assertIn('Available Chat Rooms', self.driver1.page_source)
self.assertIn('No chats are available.', self.driver1.page_source)
# seeing no chat rooms available, bob decides to creates one.
form = self.driver1.find_element_by_tag_name("form")
chat_name = self.driver1.find_element_by_name("Enter a New Room Name")
chat_name.send_keys("I like Destiny's Child")
form.submit()
# inside the chatroom, bob sees that he is the only
# subscriber. Bob sadly writes messages to himself.
form = self.driver1.find_element_by_tag_name("form")
message_field = self.driver1.find_element_by_id("messageinput")
message_field.send_keys("I like Destinys Child!")
form.submit()
time.sleep(1)
self.assertIn('<li>bob: I like Destinys Child!</li>',
self.driver1.page_source)
# sad, lonely, and unfulfilled, bob logs out.
self.driver1.find_element_by_link_text("Logout").click()
self.assertIn('Login', self.driver1.page_source)
def test_dueling_drivers(self):
# bob finds his way to homepage, and logs in
self.driver1.get(self.live_server_url)
self.assertIn('Login', self.driver1.page_source)
self.driver1.find_element_by_link_text("Login").click()
form = self.driver1.find_element_by_tag_name("form")
username_field = self.driver1.find_element_by_id("id_username")
username_field.send_keys("bob")
password_field = self.driver1.find_element_by_id("id_password")
password_field.send_keys("password")
form.submit()
# having logged in, the bob sees his profile page.
self.assertIn('Logout', self.driver1.page_source)
self.assertIn('Welcome, bob!', self.driver1.page_source)
# wanting to socialize with other nerds, bob goes back
# to the homepage to look for chatrooms.
self.driver1.find_element_by_link_text("Home").click()
self.assertIn('Super Smash Brothers', self.driver1.page_source)
self.assertIn('Destiny', self.driver1.page_source)
# having seen a chatroom bob'd like to visit, he
# enters the chat list, and looks for rooms.
self.driver1.find_element_by_link_text("Destiny").click()
self.assertIn('Available Chat Rooms', self.driver1.page_source)
self.assertIn('No chats are available.', self.driver1.page_source)
# seeing no chat rooms available, bob decides to creates one.
form = self.driver1.find_element_by_tag_name("form")
chat_name = self.driver1.find_element_by_name("Enter a New Room Name")
chat_name.send_keys("destinychat")
form.submit()
# alice goes to elleffgee.com, looking for someone to chat with.
self.driver2.get(self.live_server_url)
self.assertIn('Login', self.driver2.page_source)
self.driver2.find_element_by_link_text("Login").click()
form = self.driver2.find_element_by_tag_name("form")
username_field = self.driver2.find_element_by_id("id_username")
username_field.send_keys("alice")
password_field = self.driver2.find_element_by_id("id_password")
password_field.send_keys("password")
form.submit()
# having logged in, alice sees her profile page.
self.assertIn('Logout', self.driver2.page_source)
self.assertIn('Welcome, alice!', self.driver2.page_source)
# wanting to socialize with other nerds, alice goes back
# to the homepage to look for chatrooms.
self.driver2.find_element_by_link_text("Home").click()
self.assertIn('Super Smash Brothers', self.driver2.page_source)
self.assertIn('Destiny', self.driver2.page_source)
# having seen a chatroom alice would like to visit, she
# enters the chat list, and looks for rooms.
self.driver2.find_element_by_link_text("Destiny").click()
self.assertIn('destinychat', self.driver2.page_source)
self.driver2.find_element_by_partial_link_text("destinychat").click()
time.sleep(2)
# seeing someone, else, alice sends a message:
form = self.driver2.find_element_by_tag_name("form")
message_field = self.driver2.find_element_by_id("messageinput")
message_field.send_keys("Hello?")
form.submit()
# bob sees message, and responds:
time.sleep(2)
self.assertIn('<li>alice: Hello?</li>', self.driver1.page_source)
form = self.driver1.find_element_by_tag_name("form")
message_field = self.driver1.find_element_by_id("messageinput")
message_field.send_keys("Hello!")
form.submit()
# alice sees bob's message, and responds:
time.sleep(2)
self.assertIn('<li>bob: Hello!</li>', self.driver2.page_source)
form = self.driver2.find_element_by_tag_name("form")
message_field = self.driver2.find_element_by_id("messageinput")
message_field.send_keys("asl")
form.submit()
# bob, offended, leaves to block alice:
time.sleep(2)
self.assertIn('<li>alice: asl</li>', self.driver1.page_source)
self.driver1.find_element_by_link_text("Profile List").click()
self.driver1.find_element_by_link_text("alice").click()
self.driver1.find_element_by_link_text("Block User").click()
# alice tries again:
form = self.driver2.find_element_by_tag_name("form")
message_field = self.driver2.find_element_by_id("messageinput")
message_field.send_keys("seriously asl")
form.submit()
# bob goes back to the home page and finds that all or alices
# messages are blocked
self.driver1.find_element_by_link_text("Home").click()
self.driver1.find_element_by_link_text("Destiny").click()
self.driver1.find_element_by_partial_link_text("destinychat").click()
time.sleep(2)
# alice tries one more time:
form = self.driver2.find_element_by_tag_name("form")
message_field = self.driver2.find_element_by_id("messageinput")
message_field.send_keys("asl stands for age sex location")
form.submit()
# bob sees that alices messages are now blocked
time.sleep(2)
self.assertIn('<li>alice: blocked</li>', self.driver1.page_source)
time.sleep(5)
|
|
import sublime
import os
import time
import math
from .action_history import ActionHistory
from .java_utils import JavaUtils
from .settings import Settings
from .state_property import StateProperty
from .status_manager import StatusManager
from .thread_progress import MultiThreadProgress
class _BuildSystem:
"""
A multi-thread build system
"""
@classmethod
def instance(cls):
if not hasattr(cls, "_instance"):
cls._instance = cls()
return cls._instance
def __init__(self):
self.log_view = None
self.reset()
def reset(self):
"""
Reset the instance variables to clear the build
"""
self.failed = False
self.building = False
self.builders = []
self.create_log = False
self.finish_callback = None
self.cancel_callback = None
def create_builder(self, files=None, macro_data=None):
"""
Creates and run a builder thread for specified files
@param files: a list of file paths
"""
if not files:
return
from ..threads import BuilderThread
macro_data = macro_data or {}
builder = BuilderThread(self, files, macro_data, files)
self.progress.add(builder, "")
if not self.progress.running:
self.progress.run()
self.builders.append(builder)
def on_builder_complete(self, total_files, elapse_time, data, ret, params):
"""
A callback for the builder thread
@param total_files: a total number of files passed to the builder
@param elapse_time: a total time to build the files
@param data: a returned data from the process
@param ret: a return code from the process
@param params: an additional parameters passed to the builder
"""
if self.create_log and (
not self.log_view or not self.log_view.id()
):
self.cancel_build()
return
if ret != 0:
self.failed = True
else:
self.update_cache_for_files(params)
self.current_progress += total_files
self.progress.set_message("Building %s of %s file%s... %.2f%%" % (
self.current_progress,
self.total_progress,
"s" if self.total_progress > 1 else "",
self.current_progress * 100 / self.total_progress
))
if data:
if not self.create_log and not self.log_view:
target_group, target_index = Settings().get_view_index(
"build_log_target_group"
)
self.create_log = True
self.log_view = self.window.new_file()
self.window.set_view_index(
self.log_view,
target_group,
target_index
)
self.log_view.set_name("Preparing build log...")
self.log_view.set_syntax_file(
"Packages/Javatar/syntax/JavaCompilationError.tmLanguage"
)
# Prevent view access while creating which cause
# double view to create
time.sleep(Settings().get("build_log_delay"))
self.log_view.set_scratch(True)
self.log_view.run_command("javatar_utils", {
"util_type": "add",
"text": data
})
def on_build_complete(self):
"""
A callback when the build process is finish
"""
if self.create_log and (
not self.log_view or not self.log_view.id()
):
StatusManager().show_notification("Building Cancelled")
StatusManager().show_status("Building Cancelled", target="build")
ActionHistory().add_action(
"javatar.core.build_system.on_build_complete",
"Building Cancelled"
)
if self.cancel_callback:
self.cancel_callback()
return
if self.failed:
message = "Building Failed [{0:.2f}s]"
elif self.create_log:
message = "Building Finished with Warning [{0:.2f}s]"
else:
message = "Building Finished [{0:.2f}s]"
time_diff = time.time() - self.start_time
StatusManager().show_notification(message.format(time_diff))
self.build_size = -1
if self.log_view:
self.log_view.set_name(message.format(time_diff))
StatusManager().show_status(message.format(time_diff), target="build")
ActionHistory().add_action(
"javatar.core.build_system.on_build_complete",
message.format(time_diff)
)
if self.finish_callback:
self.finish_callback(self.failed)
self.reset()
def cancel_build(self):
"""
Cancels all running builders
"""
if not self.building:
return
for builder in self.builders:
builder.cancel()
self.building = False
def trim_extension(self, file_path):
"""
Remove a file extension from the file path
@param file_path: a file path to remove an extension
"""
filename, ext = os.path.splitext(os.path.basename(file_path))
for extension in Settings().get("java_extensions"):
if ext == extension:
return file_path[:-len(ext)]
return file_path
def update_cache_for_files(self, files):
if Settings().get("always_rebuild"):
return
cache = StateProperty().load_cache()
if "build_cache" not in cache:
cache["build_cache"] = {}
for file_path in files:
modified_time = int(os.path.getmtime(file_path))
full_class_path = JavaUtils().to_package(
self.trim_extension(file_path)
).as_class_path()
cache["build_cache"][full_class_path] = modified_time
StateProperty().save_cache(cache)
def is_file_changed(self, file_path):
"""
Returns whether the specified file path has been modified or not
@param file_path: a file path to check (must exists)
"""
if Settings().get("always_rebuild"):
return True
modified_time = int(os.path.getmtime(file_path))
cache = StateProperty().load_cache()
if "build_cache" not in cache:
cache["build_cache"] = {}
full_class_path = JavaUtils().to_package(
self.trim_extension(file_path)
).as_class_path()
if full_class_path not in cache["build_cache"]:
return True
return cache["build_cache"][full_class_path] != modified_time
def build_files(self, files=None, window=None):
"""
Calculate and assigns file paths to builder threads
@param files: a list of file paths
"""
self.log_view = None
self.window = window or sublime.active_window()
if self.building:
self.cancel_build()
if not files:
return "No class to build"
self.start_time = time.time()
if not Settings().get("always_rebuild"):
files = [
file_path
for file_path in files
if self.is_file_changed(file_path)
]
if not files:
self.on_build_complete()
from .jdk_manager import JDKManager
macro_data = {}
executable_name = JDKManager().get_executable("build")
if not executable_name:
return "Build executable is not found"
self.building = True
self.progress = MultiThreadProgress(
"Preparing build",
on_all_complete=self.on_build_complete,
target="build"
)
self.current_progress = 0
self.total_progress = len(files)
per_thread = math.ceil(
len(files) / Settings().get("builder_threads", 1)
)
self.progress.set_message("Building %s of %s file%s... %.2f%%" % (
self.current_progress,
self.total_progress,
"s" if self.total_progress > 1 else "",
self.current_progress * 100 / self.total_progress
if self.total_progress > 0 else 0
))
while files:
self.create_builder(files[:per_thread], macro_data=macro_data)
files = files[per_thread:]
return None
def build_dir(self, dir_path=None, window=None):
"""
Builds all files within a specified directory
@param dir_path: a directory path
"""
if not dir_path:
return False
return self.build_files(self.get_files(dir_path), window=window)
def build_dirs(self, dir_paths=None, window=None):
"""
Builds all files within specified directories
@param dir_paths: a list of directory path
"""
if not dir_paths:
return False
files = []
for dir_path in dir_paths:
files += self.get_files(dir_path)
if not files:
return False
return self.build_files(files, window=window)
def get_files(self, dir_path=None):
"""
Returns a list of file paths in specified directory and its
sub-directory
@param dir_path: a directory path
"""
if not dir_path:
return []
files = []
for name in os.listdir(dir_path):
path_name = os.path.join(dir_path, name)
if (os.path.isdir(path_name) and
name not in Settings().get_sublime(
"folder_exclude_patterns", []
)):
files += self.get_files(path_name)
elif (os.path.isfile(path_name) and
JavaUtils().is_java_file(path_name)):
files.append(path_name)
return files
def BuildSystem():
return _BuildSystem.instance()
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class min_max_link_delay(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/min-max-link-delay. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines min/max link delay.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "min-max-link-delay"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"min-max-link-delay",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/min_max_link_delay/state (container)
YANG Description: State parameters of IS Extended Reachability sub-TLV 34.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/min_max_link_delay/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IS Extended Reachability sub-TLV 34.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class min_max_link_delay(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/min-max-link-delay. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines min/max link delay.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "min-max-link-delay"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"min-max-link-delay",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/min_max_link_delay/state (container)
YANG Description: State parameters of IS Extended Reachability sub-TLV 34.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/min_max_link_delay/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IS Extended Reachability sub-TLV 34.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
|
|
###############################################################################
# Name: doctools.py #
# Purpose: Tools for managing document services #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2008 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
Provides helper functions and classes for managing documents and their services.
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: doctools.py 70230 2012-01-01 01:47:42Z CJP $"
__revision__ = "$Revision: 70230 $"
#--------------------------------------------------------------------------#
# Imports
import os
import sys
# Editra Libraries
import util
from profiler import Profile_Get
import ebmlib
#--------------------------------------------------------------------------#
class DocPositionMgr(object):
"""Object for managing the saving and setting of a collection of
documents positions between sessions. Through the use of an in memory
dictionary during run time and on disk dictionary to use when starting
and stopping the editor.
@note: saves config to ~/.Editra/cache/
"""
_poscache = ebmlib.HistoryCache(100)
def __init__(self):
"""Creates the position manager object"""
super(DocPositionMgr, self).__init__()
# Attributes
self._init = False
self._book = None
self._records = dict()
def InitPositionCache(self, book_path):
"""Initialize and load the on disk document position cache.
@param book_path: path to on disk cache
"""
self._init = True
self._book = book_path
if Profile_Get('SAVE_POS'):
self.LoadBook(book_path)
@classmethod
def AddNaviPosition(cls, fname, pos):
"""Add a new position to the navigation cache
@param fname: file name
@param pos: position
"""
# Don't put two identical positions in the cache next to each other
pre = cls._poscache.PeekPrevious()
next = cls._poscache.PeekNext()
if (fname, pos) in (pre, next):
return
cls._poscache.PutItem((fname, pos))
def AddRecord(self, vals):
"""Adds a record to the dictionary from a list of the
filename vals[0] and the position value vals[1].
@param vals: (file path, cursor position)
"""
if len(vals) == 2:
self._records[vals[0]] = vals[1]
return True
else:
return False
@classmethod
def CanNavigateNext(cls):
"""Are there more cached navigation positions?
@param cls: Class
@return: bool
"""
return cls._poscache.HasNext()
@classmethod
def CanNavigatePrev(cls):
"""Are there previous cached navigation positions?
@param cls: Class
@return: bool
"""
return cls._poscache.HasPrevious()
@classmethod
def FlushNaviCache(cls):
"""Clear the navigation cache"""
cls._poscache.Clear()
@classmethod
def GetNaviCacheSize(cls):
return cls._poscache.GetSize()
def GetBook(self):
"""Returns the current book used by this object
@return: path to book used by this manager
"""
return self._book
@classmethod
def GetNextNaviPos(cls, fname=None):
"""Get the next stored navigation position
The optional fname parameter will get the next found position for
the given file.
@param cls: Class
@param fname: filename (note currently not supported)
@return: int or None
@note: fname is currently not used
"""
item = cls._poscache.GetNextItem()
return item
@classmethod
def GetPreviousNaviPos(cls, fname=None):
"""Get the last stored navigation position
The optional fname parameter will get the last found position for
the given file.
@param cls: Class
@param fname: filename (note currently not supported)
@return: int or None
@note: fname is currently not used
"""
item = cls._poscache.GetPreviousItem()
return item
def GetPos(self, name):
"""Get the position record for a given filename
returns 0 if record is not found.
@param name: file name
@return: position value for the given filename
"""
return self._records.get(name, 0)
def IsInitialized(self):
"""Has the cache been initialized
@return: bool
"""
return self._init
def LoadBook(self, book):
"""Loads a set of records from an on disk dictionary
the entries are formated as key=value with one entry
per line in the file.
@param book: path to saved file
@return: whether book was loaded or not
"""
# If file does not exist create it and return
if not os.path.exists(book):
try:
tfile = util.GetFileWriter(book)
tfile.close()
except (IOError, OSError):
util.Log("[docpositionmgr][err] failed to load book: %s" % book)
return False
except AttributeError:
util.Log("[docpositionmgr][err] Failed to create: %s" % book)
return False
reader = util.GetFileReader(book, sys.getfilesystemencoding())
if reader != -1:
lines = list()
try:
lines = reader.readlines()
except:
reader.close()
return False
else:
reader.close()
for line in lines:
line = line.strip()
vals = line.rsplit(u'=', 1)
if len(vals) != 2 or not os.path.exists(vals[0]):
continue
try:
vals[1] = int(vals[1])
except (TypeError, ValueError), msg:
util.Log("[docpositionmgr][err] %s" % str(msg))
continue
else:
self._records[vals[0]] = vals[1]
util.Log("[docpositionmgr][info] successfully loaded book")
return True
@classmethod
def PeekNavi(cls, pre=False):
"""Peek into the navigation cache
@param cls: Class
@keyword pre: bool
"""
if pre:
if cls._poscache.HasPrevious():
return cls._poscache.PeekPrevious()
else:
if cls._poscache.HasNext():
return cls._poscache.PeekNext()
return None, None
def WriteBook(self):
"""Writes the collection of files=pos to the config file
@postcondition: in memory doc data is written out to disk
"""
writer = util.GetFileWriter(self.GetBook(), sys.getfilesystemencoding())
if writer != -1:
try:
for key, val in self._records.iteritems():
try:
writer.write(u"%s=%d\n" % (key, val))
except UnicodeDecodeError:
continue
writer.close()
except IOError, msg:
util.Log("[docpositionmgr][err] %s" % str(msg))
else:
util.Log("[docpositionmgr][err] Failed to open %s" % self.GetBook())
|
|
import unittest
from ometa.runtime import OMetaBase, ParseError, expected, eof
class RuntimeTests(unittest.TestCase):
"""
Tests for L{pymeta.runtime}.
"""
def test_anything(self):
"""
L{OMetaBase.rule_anything} returns each item from the input
along with its position.
"""
data = "foo"
o = OMetaBase(data)
for i, c in enumerate(data):
v, e = o.rule_anything()
self.assertEqual((c, i), (v, e[0]))
def test_exactly(self):
"""
L{OMetaBase.rule_exactly} returns the requested item from the input
string along with its position, if it's there.
"""
data = "foo"
o = OMetaBase(data)
v, e = o.rule_exactly("f")
self.assertEqual(v, "f")
self.assertEqual(e[0], 0)
def test_exactly_multi(self):
"""
L{OMetaBase.rule_exactly} returns the requested item from the input
string along with its position, if it's there.
"""
data = "foo"
o = OMetaBase(data)
v, e = o.rule_exactly("fo")
self.assertEqual(v, "fo")
self.assertEqual(e[0], 0)
def test_exactlyFail(self):
"""
L{OMetaBase.rule_exactly} raises L{ParseError} when the requested item
doesn't match the input. The error contains info on what was expected
and the position.
"""
data = "foo"
o = OMetaBase(data)
with self.assertRaises(ParseError) as e:
o.rule_exactly("g")
self.assertEquals(e.exception[1], expected(None, "g"))
self.assertEquals(e.exception[0], 0)
def test_token(self):
"""
L{OMetaBase.rule_token} matches all the characters in the given string
plus any preceding whitespace.
"""
data = " foo bar"
o = OMetaBase(data)
v, e = o.rule_token("foo")
self.assertEqual(v, "foo")
self.assertEqual(e[0], 4)
v, e = o.rule_token("bar")
self.assertEqual(v, "bar")
self.assertEqual(e[0], 8)
def test_tokenFailed(self):
"""
On failure, L{OMetaBase.rule_token} produces an error indicating the
position where match failure occurred and the expected character.
"""
data = "foozle"
o = OMetaBase(data)
with self.assertRaises(ParseError) as e:
o.rule_token("fog")
self.assertEqual(e.exception[0], 2)
self.assertEqual(e.exception[1], expected("token", "fog"))
def test_many(self):
"""
L{OMetaBase.many} returns a list of parsed values and the error that
caused the end of the loop.
"""
data = "ooops"
o = OMetaBase(data)
self.assertEqual(o.many(lambda: o.rule_exactly('o')),
(['o'] * 3, ParseError(o.input, 3,
expected(None, 'o'))))
def test_or(self):
"""
L{OMetaBase._or} returns the result of the first of its
arguments to succeed.
"""
data = "a"
o = OMetaBase(data)
called = [False, False, False]
targets = ['b', 'a', 'c']
matchers = []
for i, m in enumerate(targets):
def match(i=i, m=m):
called[i] = True
return o.exactly(m)
matchers.append(match)
v, e = o._or(matchers)
self.assertEqual(called, [True, True, False])
self.assertEqual(v, 'a')
self.assertEqual(e[0], 0)
def test_orSimpleFailure(self):
"""
When none of the alternatives passed to L{OMetaBase._or} succeed, the
one that got the furthest is returned.
"""
data = "foozle"
o = OMetaBase(data)
with self.assertRaises(ParseError) as e:
o._or([lambda: o.token("fog"),
lambda: o.token("foozik"),
lambda: o.token("woozle")])
self.assertEqual(e.exception[0], 4)
self.assertEqual(e.exception[1], expected("token", "foozik"))
def test_orFalseSuccess(self):
"""
When a failing branch of L{OMetaBase._or} gets further than a
succeeding one, its error is returned instead of the success branch's.
"""
data = "foozle"
o = OMetaBase(data)
v, e = o._or( [lambda: o.token("fog"),
lambda: o.token("foozik"),
lambda: o.token("f")])
self.assertEqual(e[0], 4)
self.assertEqual(e[1], expected("token", "foozik"))
def test_orErrorTie(self):
"""
When branches of L{OMetaBase._or} produce errors that tie for rightmost
position, they are merged.
"""
data = "foozle"
o = OMetaBase(data)
v, e = o._or( [lambda: o.token("fog"),
lambda: o.token("foz"),
lambda: o.token("f")])
self.assertEqual(e[0], 2)
self.assertEqual(e[1], [expected("token", "fog")[0],
expected("token", "foz")[0]])
def test_notError(self):
"""
When L{OMetaBase._not} fails, its error contains the current
input position and no error info.
"""
data = "xy"
o = OMetaBase(data)
with self.assertRaises(ParseError) as e:
o._not(lambda: o.exactly("x"))
self.assertEqual(e.exception[0], 1)
self.assertEqual(e.exception[1], None)
def test_spaces(self):
"""
L{OMetaBase.rule_spaces} provides error information.
"""
data = " xyz"
o = OMetaBase(data)
v, e = o.rule_spaces()
self.assertEqual(e[0], 2)
def test_predSuccess(self):
"""
L{OMetaBase.pred} returns True and empty error info on success.
"""
o = OMetaBase("")
v, e = o.pred(lambda: (True, ParseError(o.input, 0, None)))
self.assertEqual((v, e), (True, ParseError(o.input, 0, None)))
def test_predFailure(self):
"""
L{OMetaBase.pred} returns True and empty error info on success.
"""
o = OMetaBase("")
with self.assertRaises(ParseError) as e:
o.pred(lambda: (False, ParseError(o.input, 0, None)))
self.assertEqual(e.exception, ParseError(o.input, 0, None))
def test_end(self):
"""
L{OMetaBase.rule_end} matches the end of input and raises L{ParseError}
if input is left.
"""
o = OMetaBase("abc")
with self.assertRaises(ParseError) as e:
o.rule_end()
self.assertEqual(e.exception, ParseError(o.input, 1, None))
o.many(o.rule_anything)
self.assertEqual(o.rule_end(), (True, ParseError("abc", 3, None)))
def test_label(self):
"""
L{OMetaBase.label} returns a list of parsed values and the error that
caused the end of the loop.
"""
data = "ooops"
label = 'CustomLabel'
o = OMetaBase(data)
with self.assertRaises(ParseError) as e:
o.label(lambda: o.rule_exactly('x'), label)
self.assertEqual(e.exception,
ParseError(o.input, 0, expected(label)).withMessage([("Custom Exception:", label, None)]))
def test_letter(self):
"""
L{OMetaBase.rule_letter} matches letters.
"""
o = OMetaBase("a1")
v, e = o.rule_letter()
self.assertEqual((v, e), ("a", ParseError(o.input, 0, None)))
with self.assertRaises(ParseError) as e:
o.rule_letter()
self.assertEqual(e.exception, ParseError(o.input, 1,
expected("letter")))
def test_letterOrDigit(self):
"""
L{OMetaBase.rule_letterOrDigit} matches alphanumerics.
"""
o = OMetaBase("a1@")
v, e = o.rule_letterOrDigit()
self.assertEqual((v, e), ("a", ParseError(None, 0, None)))
v, e = o.rule_letterOrDigit()
self.assertEqual((v, e), ("1", ParseError(None, 1, None)))
with self.assertRaises(ParseError) as e:
o.rule_letterOrDigit()
self.assertEqual(e.exception,
ParseError(o.input, 2, expected("letter or digit")))
def test_digit(self):
"""
L{OMetaBase.rule_digit} matches digits.
"""
o = OMetaBase("1a")
v, e = o.rule_digit()
self.assertEqual((v, e), ("1", ParseError("1a", 0, None)))
with self.assertRaises(ParseError) as e:
o.rule_digit()
self.assertEqual(e.exception, ParseError(o.input, 1, expected("digit")))
def test_listpattern(self):
"""
L{OMetaBase.rule_listpattern} matches contents of lists.
"""
o = OMetaBase([["a"]], tree=True)
v, e = o.listpattern(lambda: o.exactly("a"))
self.assertEqual((v, e), (["a"], ParseError("a", 0, None)))
|
|
# -*- coding: utf-8 -*-
"""
h2/settings
~~~~~~~~~~~
This module contains a HTTP/2 settings object. This object provides a simple
API for manipulating HTTP/2 settings, keeping track of both the current active
state of the settings and the unacknowledged future values of the settings.
"""
import collections
from hyperframe.frame import SettingsFrame
from h2.errors import ErrorCodes
from h2.exceptions import InvalidSettingsValueError
# Aliases for all the settings values.
#: Allows the sender to inform the remote endpoint of the maximum size of the
#: header compression table used to decode header blocks, in octets.
HEADER_TABLE_SIZE = SettingsFrame.HEADER_TABLE_SIZE
#: This setting can be used to disable server push. To disable server push on
#: a client, set this to 0.
ENABLE_PUSH = SettingsFrame.ENABLE_PUSH
#: Indicates the maximum number of concurrent streams that the sender will
#: allow.
MAX_CONCURRENT_STREAMS = SettingsFrame.MAX_CONCURRENT_STREAMS
#: Indicates the sender's initial window size (in octets) for stream-level flow
#: control.
INITIAL_WINDOW_SIZE = SettingsFrame.INITIAL_WINDOW_SIZE
#: Indicates the size of the largest frame payload that the sender is willing
#: to receive, in octets.
MAX_FRAME_SIZE = None
#: This advisory setting informs a peer of the maximum size of header list that
#: the sender is prepared to accept, in octets. The value is based on the
#: uncompressed size of header fields, including the length of the name and
#: value in octets plus an overhead of 32 octets for each header field.
#:
#: .. versionadded:: 2.5.0
MAX_HEADER_LIST_SIZE = None
try: # Platform-specific: Hyperframe < 4.0.0
MAX_FRAME_SIZE = SettingsFrame.SETTINGS_MAX_FRAME_SIZE
except AttributeError: # Platform-specific: Hyperframe >= 4.0.0
MAX_FRAME_SIZE = SettingsFrame.MAX_FRAME_SIZE
try: # Platform-specific: Hyperframe < 4.0.0
MAX_HEADER_LIST_SIZE = SettingsFrame.SETTINGS_MAX_HEADER_LIST_SIZE
except AttributeError: # Platform-specific: Hyperframe >= 4.0.0
MAX_HEADER_LIST_SIZE = SettingsFrame.MAX_HEADER_LIST_SIZE
#: A value structure for storing changed settings.
ChangedSetting = collections.namedtuple(
'ChangedSetting', ['setting', 'original_value', 'new_value']
)
class Settings(collections.MutableMapping):
"""
An object that encapsulates HTTP/2 settings state.
HTTP/2 Settings are a complex beast. Each party, remote and local, has its
own settings and a view of the other party's settings. When a settings
frame is emitted by a peer it cannot assume that the new settings values
are in place until the remote peer acknowledges the setting. In principle,
multiple settings changes can be "in flight" at the same time, all with
different values.
This object encapsulates this mess. It provides a dict-like interface to
settings, which return the *current* values of the settings in question.
Additionally, it keeps track of the stack of proposed values: each time an
acknowledgement is sent/received, it updates the current values with the
stack of proposed values. On top of all that, it validates the values to
make sure they're allowed, and raises :class:`InvalidSettingsValueError
<h2.exceptions.InvalidSettingsValueError>` if they are not.
Finally, this object understands what the default values of the HTTP/2
settings are, and sets those defaults appropriately.
.. versionchanged:: 2.2.0
Added the ``initial_values`` parameter.
.. versionchanged:: 2.5.0
Added the ``max_header_list_size`` property.
:param client: (optional) Whether these settings should be defaulted for a
client implementation or a server implementation. Defaults to ``True``.
:type client: ``bool``
:param initial_values: (optional) Any initial values the user would like
set, rather than RFC 7540's defaults.
:type initial_vales: ``MutableMapping``
"""
def __init__(self, client=True, initial_values=None):
# Backing object for the settings. This is a dictionary of
# (setting: [list of values]), where the first value in the list is the
# current value of the setting. Strictly this doesn't use lists but
# instead uses collections.deque to avoid repeated memory allocations.
#
# This contains the default values for HTTP/2.
self._settings = {
HEADER_TABLE_SIZE: collections.deque([4096]),
ENABLE_PUSH: collections.deque([int(client)]),
INITIAL_WINDOW_SIZE: collections.deque([65535]),
MAX_FRAME_SIZE: collections.deque([16384]),
}
if initial_values is not None:
for key, value in initial_values.items():
invalid = _validate_setting(key, value)
if invalid:
raise InvalidSettingsValueError(
"Setting %d has invalid value %d" % (key, value),
error_code=invalid
)
self._settings[key] = collections.deque([value])
def acknowledge(self):
"""
The settings have been acknowledged, either by the user (remote
settings) or by the remote peer (local settings).
:returns: A dict of {setting: ChangedSetting} that were applied.
"""
changed_settings = {}
# If there is more than one setting in the list, we have a setting
# value outstanding. Update them.
for k, v in self._settings.items():
if len(v) > 1:
old_setting = v.popleft()
new_setting = v[0]
changed_settings[k] = ChangedSetting(
k, old_setting, new_setting
)
return changed_settings
# Provide easy-access to well known settings.
@property
def header_table_size(self):
"""
The current value of the :data:`HEADER_TABLE_SIZE
<h2.settings.HEADER_TABLE_SIZE>` setting.
"""
return self[HEADER_TABLE_SIZE]
@header_table_size.setter
def header_table_size(self, value):
self[HEADER_TABLE_SIZE] = value
@property
def enable_push(self):
"""
The current value of the :data:`ENABLE_PUSH <h2.settings.ENABLE_PUSH>`
setting.
"""
return self[ENABLE_PUSH]
@enable_push.setter
def enable_push(self, value):
self[ENABLE_PUSH] = value
@property
def initial_window_size(self):
"""
The current value of the :data:`INITIAL_WINDOW_SIZE
<h2.settings.INITIAL_WINDOW_SIZE>` setting.
"""
return self[INITIAL_WINDOW_SIZE]
@initial_window_size.setter
def initial_window_size(self, value):
self[INITIAL_WINDOW_SIZE] = value
@property
def max_frame_size(self):
"""
The current value of the :data:`MAX_FRAME_SIZE
<h2.settings.MAX_FRAME_SIZE>` setting.
"""
return self[MAX_FRAME_SIZE]
@max_frame_size.setter
def max_frame_size(self, value):
self[MAX_FRAME_SIZE] = value
@property
def max_concurrent_streams(self):
"""
The current value of the :data:`MAX_CONCURRENT_STREAMS
<h2.settings.MAX_CONCURRENT_STREAMS>` setting.
"""
return self.get(MAX_CONCURRENT_STREAMS, 2**32+1)
@max_concurrent_streams.setter
def max_concurrent_streams(self, value):
self[MAX_CONCURRENT_STREAMS] = value
@property
def max_header_list_size(self):
"""
The current value of the :data:`MAX_HEADER_LIST_SIZE
<h2.settings.MAX_HEADER_LIST_SIZE>` setting. If not set, returns
``None``, which means unlimited.
.. versionadded:: 2.5.0
"""
return self.get(MAX_HEADER_LIST_SIZE, None)
@max_header_list_size.setter
def max_header_list_size(self, value):
self[MAX_HEADER_LIST_SIZE] = value
# Implement the MutableMapping API.
def __getitem__(self, key):
val = self._settings[key][0]
# Things that were created when a setting was received should stay
# KeyError'd.
if val is None:
raise KeyError
return val
def __setitem__(self, key, value):
invalid = _validate_setting(key, value)
if invalid:
raise InvalidSettingsValueError(
"Setting %d has invalid value %d" % (key, value),
error_code=invalid
)
try:
items = self._settings[key]
except KeyError:
items = collections.deque([None])
self._settings[key] = items
items.append(value)
def __delitem__(self, key):
del self._settings[key]
def __iter__(self):
return self._settings.__iter__()
def __len__(self):
return len(self._settings)
def __eq__(self, other):
if isinstance(other, Settings):
return self._settings == other._settings
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, Settings):
return not self == other
else:
return NotImplemented
def _validate_setting(setting, value):
"""
Confirms that a specific setting has a well-formed value. If the setting is
invalid, returns an error code. Otherwise, returns 0 (NO_ERROR).
"""
if setting == ENABLE_PUSH:
if value not in (0, 1):
return ErrorCodes.PROTOCOL_ERROR
elif setting == INITIAL_WINDOW_SIZE:
if not 0 <= value <= 2147483647: # 2^31 - 1
return ErrorCodes.FLOW_CONTROL_ERROR
elif setting == MAX_FRAME_SIZE:
if not 16384 <= value <= 16777215: # 2^14 and 2^24 - 1
return ErrorCodes.PROTOCOL_ERROR
elif setting == MAX_HEADER_LIST_SIZE:
if value < 0:
return ErrorCodes.PROTOCOL_ERROR
return 0
|
|
"""
SQL functions reference lists:
https://web.archive.org/web/20130407175746/http://www.gaia-gis.it/gaia-sins/spatialite-sql-4.0.0.html
http://www.gaia-gis.it/gaia-sins/spatialite-sql-4.2.1.html
"""
import re
import sys
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.operations import DatabaseOperations
from django.utils import six
from django.utils.functional import cached_property
class SpatiaLiteOperations(BaseSpatialOperations, DatabaseOperations):
name = 'spatialite'
spatialite = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
Adapter = SpatiaLiteAdapter
area = 'Area'
centroid = 'Centroid'
collect = 'Collect'
contained = 'MbrWithin'
difference = 'Difference'
distance = 'Distance'
envelope = 'Envelope'
extent = 'Extent'
geojson = 'AsGeoJSON'
gml = 'AsGML'
intersection = 'Intersection'
kml = 'AsKML'
length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword
makeline = 'MakeLine'
num_geom = 'NumGeometries'
num_points = 'NumPoints'
point_on_surface = 'PointOnSurface'
scale = 'ScaleCoords'
svg = 'AsSVG'
sym_difference = 'SymDifference'
transform = 'Transform'
translate = 'ShiftCoords'
union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword
unionagg = 'GUnion'
from_text = 'GeomFromText'
from_wkb = 'GeomFromWKB'
select = 'AsText(%s)'
gis_operators = {
'equals': SpatialOperator(func='Equals'),
'disjoint': SpatialOperator(func='Disjoint'),
'dwithin': SpatialOperator(func='PtDistWithin'),
'touches': SpatialOperator(func='Touches'),
'crosses': SpatialOperator(func='Crosses'),
'within': SpatialOperator(func='Within'),
'overlaps': SpatialOperator(func='Overlaps'),
'contains': SpatialOperator(func='Contains'),
'intersects': SpatialOperator(func='Intersects'),
'relate': SpatialOperator(func='Relate'),
# Returns true if B's bounding box completely contains A's bounding box.
'contained': SpatialOperator(func='MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains': SpatialOperator(func='MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps': SpatialOperator(func='MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as': SpatialOperator(func='Equals'),
'exact': SpatialOperator(func='Equals'),
'distance_gt': SpatialOperator(func='Distance', op='>'),
'distance_gte': SpatialOperator(func='Distance', op='>='),
'distance_lt': SpatialOperator(func='Distance', op='<'),
'distance_lte': SpatialOperator(func='Distance', op='<='),
}
disallowed_aggregates = (aggregates.Extent3D,)
@cached_property
def function_names(self):
return {
'Length': 'ST_Length',
'Reverse': 'ST_Reverse',
'Scale': 'ScaleCoords',
'Translate': 'ST_Translate',
'Union': 'ST_Union',
}
@cached_property
def unsupported_functions(self):
unsupported = {'BoundingCircle', 'ForceRHR', 'IsValid', 'MakeValid', 'MemSize'}
if not self.lwgeom_version():
unsupported.add('GeoHash')
return unsupported
@cached_property
def spatial_version(self):
"""Determine the version of the SpatiaLite library."""
try:
version = self.spatialite_version_tuple()[1:]
except Exception as msg:
new_msg = (
'Cannot determine the SpatiaLite version for the "%s" '
'database (error was "%s"). Was the SpatiaLite initialization '
'SQL loaded on this database?') % (self.connection.settings_dict['NAME'], msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
if version < (4, 0, 0):
raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions 4.0.0 and above.')
return version
def convert_extent(self, box, srid):
"""
Convert the polygon data received from SpatiaLite to min/max values.
"""
if box is None:
return None
shell = Geometry(box, srid).shell
xmin, ymin = shell[0][:2]
xmax, ymax = shell[2][:2]
return (xmin, ymin, xmax, ymax)
def convert_geom(self, wkt, geo_field):
"""
Converts geometry WKT returned from a SpatiaLite aggregate.
"""
if wkt:
return Geometry(wkt, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns None because geometry columns are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type, **kwargs):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type. SpatiaLite only supports regular
cartesian-based queries (no spheroid/sphere calculations for point
geometries like PostGIS).
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError('SpatiaLite does not support distance queries on '
'geometry fields with a geodetic coordinate system. '
'Distance objects; use a numeric value of your '
'distance in degrees instead.')
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
Transform() and GeomFromText() function call(s).
"""
def transform_value(value, srid):
return not (value is None or value.srid == srid)
if hasattr(value, 'as_sql'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
sql, _ = compiler.compile(value)
return placeholder % sql
else:
if transform_value(value, f.srid):
# Adding Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid)
else:
return '%s(%%s,%s)' % (self.from_text, f.srid)
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
Any error occurring in this method should be handled by the caller.
"""
cursor = self.connection._cursor()
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
finally:
cursor.close()
return row[0]
def geos_version(self):
"Returns the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Returns the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def lwgeom_version(self):
"""Return the version of LWGEOM library used by SpatiaLite."""
return self._get_spatialite_func('lwgeom_version()')
def spatialite_version(self):
"Returns the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Returns the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
version = self.spatialite_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse SpatiaLite version string: %s' % version)
return (version, major, minor1, minor2)
def spatial_aggregate_name(self, agg_name):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteGeometryColumns
return SpatialiteGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys
return SpatialiteSpatialRefSys
def get_db_converters(self, expression):
converters = super(SpatiaLiteOperations, self).get_db_converters(expression)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Integration tests for ssh module.
"""
from __future__ import print_function
import os
import random
import socket
import subprocess
from helpers import unittest
import target_test
from luigi.contrib.ssh import RemoteContext, RemoteFileSystem, RemoteTarget
from luigi.target import MissingParentDirectory, FileAlreadyExists
working_ssh_host = os.environ.get('SSH_TEST_HOST', 'localhost')
# set this to a working ssh host string (e.g. "localhost") to activate integration tests
# The following tests require a working ssh server at `working_ssh_host`
# the test runner can ssh into using password-less authentication
# since `nc` has different syntax on different platforms
# we use a short python command to start
# a 'hello'-server on the remote machine
HELLO_SERVER_CMD = """
import socket, sys
listener = socket.socket()
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(('localhost', 2134))
listener.listen(1)
sys.stdout.write('ready')
sys.stdout.flush()
conn = listener.accept()[0]
conn.sendall(b'hello')
"""
try:
x = subprocess.check_output(
"ssh %s -S none -o BatchMode=yes 'echo 1'" % working_ssh_host,
shell=True
)
if x != b'1\n':
raise unittest.SkipTest('Not able to connect to ssh server')
except Exception:
raise unittest.SkipTest('Not able to connect to ssh server')
class TestRemoteContext(unittest.TestCase):
def setUp(self):
self.context = RemoteContext(working_ssh_host)
def tearDown(self):
try:
self.remote_server_handle.terminate()
except Exception:
pass
def test_check_output(self):
""" Test check_output ssh
Assumes the running user can ssh to working_ssh_host
"""
output = self.context.check_output(["echo", "-n", "luigi"])
self.assertEqual(output, b"luigi")
def test_tunnel(self):
print("Setting up remote listener...")
self.remote_server_handle = self.context.Popen([
"python", "-c", '"{0}"'.format(HELLO_SERVER_CMD)
], stdout=subprocess.PIPE)
print("Setting up tunnel")
with self.context.tunnel(2135, 2134):
print("Tunnel up!")
# hack to make sure the listener process is up
# and running before we write to it
server_output = self.remote_server_handle.stdout.read(5)
self.assertEqual(server_output, b"ready")
print("Connecting to server via tunnel")
s = socket.socket()
s.connect(("localhost", 2135))
print("Receiving...",)
response = s.recv(5)
self.assertEqual(response, b"hello")
print("Closing connection")
s.close()
print("Waiting for listener...")
output, _ = self.remote_server_handle.communicate()
self.assertEqual(self.remote_server_handle.returncode, 0)
print("Closing tunnel")
class TestRemoteTarget(unittest.TestCase):
""" These tests assume RemoteContext working
in order for setUp and tearDown to work
"""
def setUp(self):
self.ctx = RemoteContext(working_ssh_host)
self.filepath = "/tmp/luigi_remote_test.dat"
self.target = RemoteTarget(
self.filepath,
working_ssh_host,
)
self.ctx.check_output(["rm", "-rf", self.filepath])
self.ctx.check_output(["echo -n 'hello' >", self.filepath])
def tearDown(self):
self.ctx.check_output(["rm", "-rf", self.filepath])
def test_exists(self):
self.assertTrue(self.target.exists())
no_file = RemoteTarget(
"/tmp/_file_that_doesnt_exist_",
working_ssh_host,
)
self.assertFalse(no_file.exists())
def test_remove(self):
self.target.remove()
self.assertRaises(
subprocess.CalledProcessError,
self.ctx.check_output,
["cat", self.filepath]
)
def test_open(self):
f = self.target.open('r')
file_content = f.read()
f.close()
self.assertEqual(file_content, "hello")
self.assertTrue(self.target.fs.exists(self.filepath))
self.assertFalse(self.target.fs.isdir(self.filepath))
def test_context_manager(self):
with self.target.open('r') as f:
file_content = f.read()
self.assertEqual(file_content, "hello")
class TestRemoteFilesystem(unittest.TestCase):
def setUp(self):
self.fs = RemoteFileSystem(working_ssh_host)
self.root = '/tmp/luigi-remote-test'
self.directory = self.root + '/dir'
self.filepath = self.directory + '/file'
self.target = RemoteTarget(
self.filepath,
working_ssh_host,
)
self.fs.remote_context.check_output(['rm', '-rf', self.root])
self.addCleanup(self.fs.remote_context.check_output, ['rm', '-rf', self.root])
def test_mkdir(self):
self.assertFalse(self.fs.isdir(self.directory))
self.assertRaises(MissingParentDirectory, self.fs.mkdir, self.directory, parents=False)
self.fs.mkdir(self.directory)
self.assertTrue(self.fs.isdir(self.directory))
# Shouldn't throw
self.fs.mkdir(self.directory)
self.assertRaises(FileAlreadyExists, self.fs.mkdir, self.directory, raise_if_exists=True)
def test_list(self):
with self.target.open('w'):
pass
self.assertEquals([self.target.path], list(self.fs.listdir(self.directory)))
class TestRemoteTargetAtomicity(unittest.TestCase, target_test.FileSystemTargetTestMixin):
path = '/tmp/luigi_remote_atomic_test.txt'
ctx = RemoteContext(working_ssh_host)
def create_target(self, format=None):
return RemoteTarget(self.path, working_ssh_host, format=format)
def _exists(self, path):
try:
self.ctx.check_output(["test", "-e", path])
except subprocess.CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise
return True
def assertCleanUp(self, tp):
self.assertFalse(self._exists(tp))
def setUp(self):
self.ctx.check_output(["rm", "-rf", self.path])
self.local_file = '/tmp/local_luigi_remote_atomic_test.txt'
if os.path.exists(self.local_file):
os.remove(self.local_file)
def tearDown(self):
self.ctx.check_output(["rm", "-rf", self.path])
if os.path.exists(self.local_file):
os.remove(self.local_file)
def test_put(self):
f = open(self.local_file, 'w')
f.write('hello')
f.close()
t = RemoteTarget(self.path, working_ssh_host)
t.put(self.local_file)
self.assertTrue(self._exists(self.path))
def test_get(self):
self.ctx.check_output(["echo -n 'hello' >", self.path])
t = RemoteTarget(self.path, working_ssh_host)
t.get(self.local_file)
f = open(self.local_file, 'r')
file_content = f.read()
self.assertEqual(file_content, 'hello')
class TestRemoteTargetCreateDirectories(TestRemoteTargetAtomicity):
path = '/tmp/%s/xyz/luigi_remote_atomic_test.txt' % random.randint(0, 999999999)
class TestRemoteTargetRelative(TestRemoteTargetAtomicity):
path = 'luigi_remote_atomic_test.txt'
|
|
"""
flask_oauthlib.contrib.apps
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The bundle of remote app factories for famous third platforms.
Usage::
from flask import Flask
from flask_oauthlib.client import OAuth
from flask_oauthlib.contrib.apps import github
app = Flask(__name__)
oauth = OAuth(app)
github.register_to(oauth, scope=['user:email'])
github.register_to(oauth, name='github2')
Of course, it requires consumer keys in your config::
GITHUB_CONSUMER_KEY = ''
GITHUB_CONSUMER_SECRET = ''
GITHUB2_CONSUMER_KEY = ''
GITHUB2_CONSUMER_SECRET = ''
Some apps with OAuth 1.0a such as Twitter could not accept the ``scope``
argument.
Contributed by: tonyseek
"""
import copy
from oauthlib.common import unicode_type, bytes_type
__all__ = ['douban', 'dropbox', 'facebook', 'github', 'google', 'linkedin',
'twitter', 'weibo']
class RemoteAppFactory(object):
"""The factory to create remote app and bind it to given extension.
:param default_name: the default name which be used for registering.
:param kwargs: the pre-defined kwargs.
:param docstring: the docstring of factory.
"""
def __init__(self, default_name, kwargs, docstring=''):
assert 'name' not in kwargs
assert 'register' not in kwargs
self.default_name = default_name
self.kwargs = kwargs
self._kwargs_processor = None
self.__doc__ = docstring.lstrip()
def register_to(self, oauth, name=None, **kwargs):
"""Creates a remote app and registers it."""
kwargs = self._process_kwargs(
name=(name or self.default_name), **kwargs)
return oauth.remote_app(**kwargs)
def create(self, oauth, **kwargs):
"""Creates a remote app only."""
kwargs = self._process_kwargs(
name=self.default_name, register=False, **kwargs)
return oauth.remote_app(**kwargs)
def kwargs_processor(self, fn):
"""Sets a function to process kwargs before creating any app."""
self._kwargs_processor = fn
return fn
def _process_kwargs(self, **kwargs):
final_kwargs = copy.deepcopy(self.kwargs)
# merges with pre-defined kwargs
final_kwargs.update(copy.deepcopy(kwargs))
# use name as app key
final_kwargs.setdefault('app_key', final_kwargs['name'].upper())
# processes by pre-defined function
if self._kwargs_processor is not None:
final_kwargs = self._kwargs_processor(**final_kwargs)
return final_kwargs
def make_scope_processor(default_scope):
def processor(**kwargs):
# request_token_params
scope = kwargs.pop('scope', [default_scope]) # default scope
if not isinstance(scope, (unicode_type, bytes_type)):
scope = ','.join(scope) # allows list-style scope
request_token_params = kwargs.setdefault('request_token_params', {})
request_token_params.setdefault('scope', scope) # doesn't override
return kwargs
return processor
douban = RemoteAppFactory('douban', {
'base_url': 'https://api.douban.com/v2/',
'request_token_url': None,
'access_token_url': 'https://www.douban.com/service/auth2/token',
'authorize_url': 'https://www.douban.com/service/auth2/auth',
'access_token_method': 'POST',
}, """
The OAuth app for douban.com API.
:param scope: optional. default: ``['douban_basic_common']``.
see also: http://developers.douban.com/wiki/?title=oauth2
""")
douban.kwargs_processor(make_scope_processor('douban_basic_common'))
dropbox = RemoteAppFactory('dropbox', {
'base_url': 'https://www.dropbox.com/1/',
'request_token_url': None,
'access_token_url': 'https://api.dropbox.com/1/oauth2/token',
'authorize_url': 'https://www.dropbox.com/1/oauth2/authorize',
'access_token_method': 'POST',
'request_token_params': {},
}, """The OAuth app for Dropbox API.""")
facebook = RemoteAppFactory('facebook', {
'request_token_params': {'scope': 'email'},
'base_url': 'https://graph.facebook.com',
'request_token_url': None,
'access_token_url': '/oauth/access_token',
'authorize_url': 'https://www.facebook.com/dialog/oauth',
}, """
The OAuth app for Facebook API.
:param scope: optional. default: ``['email']``.
""")
facebook.kwargs_processor(make_scope_processor('email'))
github = RemoteAppFactory('github', {
'base_url': 'https://api.github.com/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://github.com/login/oauth/access_token',
'authorize_url': 'https://github.com/login/oauth/authorize',
}, """
The OAuth app for GitHub API.
:param scope: optional. default: ``['user:email']``.
""")
github.kwargs_processor(make_scope_processor('user:email'))
google = RemoteAppFactory('google', {
'base_url': 'https://www.googleapis.com/oauth2/v1/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://accounts.google.com/o/oauth2/token',
'authorize_url': 'https://accounts.google.com/o/oauth2/auth',
}, """
The OAuth app for Google API.
:param scope: optional.
default: ``['email']``.
""")
google.kwargs_processor(make_scope_processor(
'email'))
twitter = RemoteAppFactory('twitter', {
'base_url': 'https://api.twitter.com/1.1/',
'request_token_url': 'https://api.twitter.com/oauth/request_token',
'access_token_url': 'https://api.twitter.com/oauth/access_token',
'authorize_url': 'https://api.twitter.com/oauth/authenticate',
}, """The OAuth app for Twitter API.""")
weibo = RemoteAppFactory('weibo', {
'base_url': 'https://api.weibo.com/2/',
'authorize_url': 'https://api.weibo.com/oauth2/authorize',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://api.weibo.com/oauth2/access_token',
# since weibo's response is a shit, we need to force parse the content
'content_type': 'application/json',
}, """
The OAuth app for weibo.com API.
:param scope: optional. default: ``['email']``
""")
weibo.kwargs_processor(make_scope_processor('email'))
def change_weibo_header(uri, headers, body):
"""Since weibo is a rubbish server, it does not follow the standard,
we need to change the authorization header for it."""
auth = headers.get('Authorization')
if auth:
auth = auth.replace('Bearer', 'OAuth2')
headers['Authorization'] = auth
return uri, headers, body
weibo.pre_request = change_weibo_header
linkedin = RemoteAppFactory('linkedin', {
'request_token_params': {'state': 'RandomString'},
'base_url': 'https://api.linkedin.com/v1/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://www.linkedin.com/uas/oauth2/accessToken',
'authorize_url': 'https://www.linkedin.com/uas/oauth2/authorization',
}, """
The OAuth app for LinkedIn API.
:param scope: optional. default: ``['r_basicprofile']``
""")
linkedin.kwargs_processor(make_scope_processor('r_basicprofile'))
def change_linkedin_query(uri, headers, body):
auth = headers.pop('Authorization')
headers['x-li-format'] = 'json'
if auth:
auth = auth.replace('Bearer', '').strip()
if '?' in uri:
uri += '&oauth2_access_token=' + auth
else:
uri += '?oauth2_access_token=' + auth
return uri, headers, body
linkedin.pre_request = change_linkedin_query
|
|
#!/usr/bin/env python
#
# Test cases for tournament.py
from tournament import *
def test_delete_all_event(test_num):
delete_all_events()
c = count_events()
if type(c) is not long:
raise TypeError(
"count_events() should return long value.")
if c != 0:
raise ValueError("After deleting, count_events should return zero.")
print ("{}. All events can be deleted.").format(test_num)
def test_delete_one_event(test_num):
delete_all_events()
event_id = register_event("Blitz Tournament", "2015/12/30")
delete_event(event_id)
event_id = register_event("Blitz Tournament2", "2015/12/30")
c = count_events()
if type(c) is not long:
raise TypeError(
"count_events() should return long value.")
if c != 1:
raise ValueError("After deleting, count_events should return one.")
print ("{}. One event can be deleted.").format(test_num)
def test_register_event(test_num):
delete_all_events()
register_event("Blitz Tournament", "2015/12/30")
c = count_events()
if type(c) is not long:
raise TypeError(
"count_events() should return a long value.")
if c != 1:
raise ValueError("After one event registered, count_events should \
return one.")
print ("{}. After registering an event, count_events() returns 1.")\
.format(test_num)
def test_delete_players(test_num):
delete_all_events()
delete_players()
c = count_players()
if type(c) is not long:
raise TypeError(
"count_players() should return long value.")
if c != 0:
raise ValueError("After deleting, count_players should return zero.")
print ("{}. All players can be deleted.").format(test_num)
def test_register_player(test_num):
delete_all_events()
delete_players()
register_player("Aristoteles", "Nunez")
c = count_players()
if type(c) is not long:
raise TypeError(
"count_players() should return long value.")
if c != 1:
raise ValueError(
"After one player registers, count_players() should be 1.")
print ("{}. After registering a player, count_players() returns 1.")\
.format(test_num)
def test_add_player_to_event(test_num):
delete_all_events()
delete_players()
event_id = register_event("Blitz Tournament", "2015/12/30")
player_id = register_player("Aristoteles", "Nunez")
add_player_to_event(event_id, player_id)
c = count_players_in_event(event_id)
if type(c) is not long:
raise TypeError(
"count_players() should return long value.")
if c != 1:
raise ValueError(
"After one player adds to an event, count_players_in_event() should be 1.")
print ("{}. After adding a player, count_players_in_event() returns 1.")\
.format(test_num)
def test_remove_player_from_event(test_num):
delete_all_events()
delete_players()
event_id = register_event("Blitz Tournament", "2015/12/30")
player_id = register_player("Aristoteles", "Nunez")
add_player_to_event(event_id, player_id)
remove_player_from_event(event_id, player_id)
c = count_players_in_event(event_id)
if type(c) is not long:
raise TypeError(
"count_players() should return long value.")
if c != 0:
raise ValueError(
"count_players_in_event() should be 0.")
print ("{}. After removing a player, count_players_in_event() returns 0.")\
.format(test_num)
def test_delete_all_matches(test_num):
delete_all_events()
delete_all_matches()
print ("{}. All matches can be deleted.").format(test_num)
def test_delete_matches_from_event(test_num):
delete_all_events()
event_id = register_event("Blitz Tournament", "2015/12/30")
delete_matches_from_event(event_id)
print ("{}. All matches from event can be deleted.").format(test_num)
def test_register_count_delete(test_num):
delete_all_events()
delete_all_matches()
delete_players()
register_player("Markov", "Chaney")
register_player("Joe", "Malik")
register_player("Mao", "Tsu-hsi")
register_player("Atlanta", "Hope")
c = count_players()
if c != 4:
raise ValueError(
"After registering four players, count_players should be 4.")
delete_players()
c = count_players()
if c != 0:
raise ValueError("After deleting, count_players should return zero.")
print ("{}. Players can be registered and deleted.").format(test_num)
def test_standings_before_matches(test_num):
delete_all_events()
delete_all_matches()
delete_players()
event_id = register_event("Blitz Tournament", "2015/12/30")
player1_id = register_player("Melpomene", "Murray")
player2_id = register_player("Randy", "Schwartz")
player3_id = register_player("Aristoteles", "Nunez")
player4_id = register_player("Gary", "Nunez")
add_player_to_event(event_id, player1_id)
add_player_to_event(event_id, player4_id)
standings = player_standings(event_id)
if len(standings) < 2:
raise ValueError("Players should appear in player_standings even before "
"they have played any matches.")
elif len(standings) > 2:
raise ValueError("Only registered players should appear in standings.")
if len(standings[0]) != 4:
raise ValueError("Each player_standings row should have four columns.")
[(id1, name1, wins1, matches1), (id2, name2, wins2, matches2)] = standings
if matches1 != 0 or matches2 != 0 or wins1 != 0 or wins2 != 0:
raise ValueError(
"Newly registered players should have no matches or wins.")
if set([name1, name2]) != set(["Melpomene Murray", "Gary Nunez"]):
raise ValueError("Registered players' names should appear in standings, "
"even if they have no matches played.")
print ("{}. Newly registered players appear in the standings with no matches.")\
.format(test_num)
def test_report_matches(test_num):
delete_all_events()
delete_all_matches()
delete_players()
event_id = register_event("Blitz Tournament", "2015/12/30")
player1_id = register_player("Melpomene", "Murray")
player2_id = register_player("Randy", "Schwartz")
player3_id = register_player("Aristoteles", "Nunez")
player4_id = register_player("Gary", "Nunez")
add_player_to_event(event_id, player1_id)
add_player_to_event(event_id, player2_id)
add_player_to_event(event_id, player3_id)
add_player_to_event(event_id, player4_id)
standings = player_standings(event_id)
[id1, id2, id3, id4] = [row[0] for row in standings]
report_match(event_id, 1, id1, 1.0, id2, 0.0)
report_match(event_id, 1, id3, 0.0, id4, 1.0)
standings = player_standings(event_id)
for (i, n, w, m) in standings:
if m != 1:
raise ValueError("Each player should have one match recorded.")
if i in (id1, id4) and w < 1:
raise ValueError("Each match winner should have one win recorded.")
elif i in (id2, id3) and w > 0:
raise ValueError("Each match loser should have zero wins recorded.")
print ("{}. After a match, players have updated standings.")\
.format(test_num)
def test_pairings(test_num):
delete_all_events()
delete_all_matches()
delete_players()
event_id = register_event("Blitz Tournament", "2015/12/30")
player1_id = register_player("Twilight", "Sparkle")
player2_id = register_player("Flutter", "Shy")
player3_id = register_player("Aristoteles", "Nunez")
player4_id = register_player("Gary", "Nunez")
add_player_to_event(event_id, player1_id)
add_player_to_event(event_id, player2_id)
add_player_to_event(event_id, player3_id)
add_player_to_event(event_id, player4_id)
standings = player_standings(event_id)
[id1, id2, id3, id4] = [row[0] for row in standings]
report_match(event_id, 1, id1, 1.0, id2, 0.0)
report_match(event_id, 1, id3, 1.0, id4, 0.0)
pairings = swiss_pairings(event_id, 1)
if len(pairings) != 2:
raise ValueError(
"For four players, swissPairings should return two pairs.")
[(pid1, pname1, pid2, pname2), (pid3, pname3, pid4, pname4)] = pairings
correct_pairs = set([frozenset([id1, id3]), frozenset([id2, id4])])
actual_pairs = set([frozenset([pid1, pid2]), frozenset([pid3, pid4])])
if correct_pairs != actual_pairs:
raise ValueError(
"After one match, players with one win should be paired.")
print ("{}. After one match, players with one win are paired.")\
.format(test_num)
def test_tournament (test_num):
delete_all_events()
delete_all_matches()
delete_players()
event_id = register_event("Blitz Tournament", "2015/12/30")
player1_id = register_player("Twilight", "Sparkle")
player2_id = register_player("Flutter", "Shy")
player3_id = register_player("Aristoteles", "Nunez")
player4_id = register_player("Gary", "Nunez")
player5_id = register_player("Federico", "Juarez")
player6_id = register_player("Sahadi", "Urbina")
player7_id = register_player("Itzel", "Lopez")
player8_id = register_player("Vladimir", "Kramnik")
player9_id = register_player("Bobby", "Fisher")
player10_id = register_player("Magnus", "Carlsen")
player11_id = register_player("Emanuel", "Lasker")
player12_id = register_player("Raul", "Capablanca")
player13_id = register_player("Boris", "Spasky")
player14_id = register_player("Anand", "Viswanathan")
player15_id = register_player("Gary", "Kasparov")
player16_id = register_player("Anatoli", "Karpov")
add_player_to_event(event_id, player1_id)
add_player_to_event(event_id, player2_id)
add_player_to_event(event_id, player3_id)
add_player_to_event(event_id, player4_id)
add_player_to_event(event_id, player5_id)
add_player_to_event(event_id, player6_id)
add_player_to_event(event_id, player7_id)
add_player_to_event(event_id, player8_id)
add_player_to_event(event_id, player9_id)
add_player_to_event(event_id, player10_id)
add_player_to_event(event_id, player11_id)
add_player_to_event(event_id, player12_id)
add_player_to_event(event_id, player13_id)
add_player_to_event(event_id, player14_id)
add_player_to_event(event_id, player15_id)
add_player_to_event(event_id, player16_id)
standings = player_standings(event_id)
#print ("\n{}\n".format(standings))
pairings = swiss_pairings(event_id, 1)
for pair in pairings:
(id1, name1, id2, name2) = pair
report_match(event_id, 1, id1, 1.0, id2, 0.0)
standings = player_standings(event_id)
#print ("\n{}\n".format(standings))
pairings = swiss_pairings(event_id, 2)
for pair in pairings:
(id1, name1, id2, name2) = pair
report_match(event_id, 2, id1, 1.0, id2, 0.0)
standings = player_standings(event_id)
#print ("\n{}\n".format(standings))
pairings = swiss_pairings(event_id, 3)
for pair in pairings:
(id1, name1, id2, name2) = pair
report_match(event_id, 3, id1, 1.0, id2, 0.0)
standings = player_standings(event_id)
#print ("\n{}\n".format(standings))
pairings = swiss_pairings(event_id, 4)
for pair in pairings:
(id1, name1, id2, name2) = pair
report_match(event_id, 4, id1, 0.0, id2, 1.0)
standings = player_standings(event_id)
#print ("\n{}\n".format(standings))
for (i, n, p, m) in standings:
if m != 4:
raise ValueError("Each player should have 4 matches recorded.")
(i1, n1, p1, m1) = standings[0]
(i16, n16, p16, m16) = standings[len(standings)-1]
if p1 < 4.0:
raise ValueError("In this case winner must have 4 points")
if p16 > 0.0:
raise ValueError("In this case the last player must have 0 points")
print ("{}. After 4 rounds we have a winner")\
.format(test_num)
def test_prevent_rematches (test_num):
delete_all_events()
delete_all_matches()
delete_players()
event_id = register_event("Blitz Tournament", "2015/12/30")
player1_id = register_player("Twilight", "Sparkle")
player2_id = register_player("Flutter", "Shy")
player3_id = register_player("Aristoteles", "Nunez")
player4_id = register_player("Gary", "Nunez")
player5_id = register_player("Vladimir", "Kramnik")
player6_id = register_player("Sahadi", "Urbina")
player7_id = register_player("Itzel", "Lopez")
player8_id = register_player("Vladimir", "Kramnik")
add_player_to_event(event_id, player1_id)
add_player_to_event(event_id, player2_id)
add_player_to_event(event_id, player3_id)
add_player_to_event(event_id, player4_id)
add_player_to_event(event_id, player5_id)
add_player_to_event(event_id, player6_id)
add_player_to_event(event_id, player7_id)
add_player_to_event(event_id, player8_id)
standings = player_standings(event_id)
#print ("\n{}\n".format(standings))
# Pairings with score 0
pairings = swiss_pairings(event_id, 1)
(p1id1, p1name1, p1id2, p1name2) = pairings[0]
for pair in pairings:
(id1, name1, id2, name2) = pair
report_match(event_id, 1, id1, 0.5, id2, 0.5)
standings = player_standings(event_id)
#print ("\n{}\n".format(standings))
# After everybody ties, the pairings must prevent rematches
pairings = swiss_pairings(event_id, 2)
#print ("\n{}\n".format(pairings))
(p2id1, p2name1, p2id2, p2name2) = pairings[0]
round_one = set([p1id1, p1id2])
round_two = set([p2id1, p2id2])
if round_one == round_two:
raise ValueError(
"After one match players do not rematch.")
print ("{}. Preventing rematches between players")\
.format(test_num)
def test_odd_players (test_num):
delete_all_events()
delete_all_matches()
delete_players()
event_id = register_event("Blitz Tournament", "2015/12/30")
player1_id = register_player("Twilight", "Sparkle")
player2_id = register_player("Flutter", "Shy")
player3_id = register_player("Aristoteles", "Nunez")
add_player_to_event(event_id, player1_id)
add_player_to_event(event_id, player2_id)
add_player_to_event(event_id, player3_id)
standings = player_standings(event_id)
pairings = swiss_pairings(event_id, 1)
#print ("\n{}\n".format(pairings))
standings = player_standings(event_id)
#print ("\n{}\n".format(standings))
if len(standings) < 4:
raise ValueError("In this case there must be 4 players")
print ("{}. Player Bye Added when odd number of players")\
.format(test_num)
if __name__ == '__main__':
test_delete_all_event(1)
test_delete_one_event(2)
test_register_event(3)
test_delete_players(4)
test_register_player(5)
test_add_player_to_event(6)
test_remove_player_from_event(7)
test_delete_all_matches(8)
test_delete_matches_from_event(9)
test_register_count_delete(10)
test_standings_before_matches(11)
test_report_matches(12)
test_pairings(13)
test_tournament (14)
test_prevent_rematches(15)
test_odd_players(16)
print ("Success! All tests pass!")
|
|
# -*- coding: utf-8 -*-
"""
tipfy
~~~~~
Minimalist WSGI application and utilities for App Engine.
:copyright: 2010 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
import logging
import os
from wsgiref.handlers import CGIHandler
# Werkzeug swiss knife.
# Need to import werkzeug first otherwise py_zipimport fails.
import werkzeug
from werkzeug import (cached_property, escape, import_string, redirect,
Request as WerkzeugRequest, Response as WerkzeugResponse, url_quote)
from werkzeug.exceptions import (abort, BadGateway, BadRequest, Forbidden,
Gone, HTTPException, InternalServerError, LengthRequired,
MethodNotAllowed, NotAcceptable, NotFound, NotImplemented,
PreconditionFailed, RequestEntityTooLarge, RequestTimeout,
RequestURITooLarge, ServiceUnavailable, Unauthorized,
UnsupportedMediaType)
from werkzeug.routing import (BaseConverter, EndpointPrefix, Map,
RequestRedirect, Rule as WerkzeugRule, RuleFactory, RuleTemplate,
Subdomain, Submount)
try:
# We declare the namespace to be used outside of App Engine, so that
# we can distribute and install separate extensions.
__import__('pkg_resources').declare_namespace(__name__)
except ImportError, e:
pass
__version__ = '0.6.3'
__version_info__ = tuple(int(n) for n in __version__.split('.'))
#: Default configuration values for this module. Keys are:
#:
#: apps_installed
#: A list of active app modules as a string. Default is an empty list.
#:
#: apps_entry_points
#: URL entry points for the installed apps, in case their URLs are mounted
#: using base paths.
#:
#: middleware
#: A list of middleware classes for the WSGIApplication. The classes can
#: be defined as strings. They define hooks that plug into the application
#: to initialize stuff when the app is built, at the start or end of a
#: request or to handle exceptions. Default is an empty list.
#:
#: server_name
#: A server name hint, used to calculate current subdomain.
#: If you plan to use dynamic subdomains, you must define the main domain
#: here so that the subdomain can be extracted and applied to URL rules.
#:
#: subdomain
#: Force this subdomain to be used instead of extracting
#: the subdomain from the current url.
#:
#: dev
#: True is this is the development server, False otherwise.
#: Default is the value of ``os.environ['SERVER_SOFTWARE']``.
#:
#: app_id
#: The application id. Default is the value of
#: ``os.environ['APPLICATION_ID']``.
#:
#: version_id
#: The current deplyment version id. Default is the value
#: of ``os.environ['CURRENT_VERSION_ID']``.
default_config = {
'apps_installed': [],
'apps_entry_points': {},
'middleware': [],
'server_name': None,
'subdomain': None,
'dev': os.environ.get('SERVER_SOFTWARE', '').startswith('Dev'),
'app_id': os.environ.get('APPLICATION_ID', None),
'version_id': os.environ.get('CURRENT_VERSION_ID', '1'),
}
# Allowed request methods.
ALLOWED_METHODS = frozenset(['DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT',
'TRACE'])
# Value used for required values.
REQUIRED_VALUE = object()
# Value used for missing default values.
DEFAULT_VALUE = object()
class RequestHandler(object):
"""Base class to handle requests. Implements the minimal interface
required by :class:`Tipfy`.
The dispatch method implements a middleware system to execute hooks before
and after processing a request and to handle exceptions.
"""
#: A list of middleware classes or callables. A middleware can implement
#: three methods that are called before and after the current request
#: method is executed, or if an exception occurs:
#:
#: pre_dispatch(handler)
#: Called before the requested method is
#: executed. If returns a response, stops the middleware chain and
#: uses that response, not calling the requested method.
#:
#: post_dispatch(handler, response)
#: Called after the requested method is executed. Must always return
#: a response. All *post_dispatch* middleware are always executed.
#:
#: handle_exception(exception, handler)
#: Called if an exception occurs.
middleware = []
def __init__(self, app, request):
"""Initializes the handler.
:param app:
A :class:`Tipfy` instance.
:param request:
A :class:`Request` instance.
"""
self.app = app
self.request = request
def __call__(self, _method, *args, **kwargs):
"""Executes a handler method. This is called by :class:`Tipfy` and
must return a :class:`Response` object.
:param _method:
The method to be dispatched, normally the request method in
lower case, e.g., 'get', 'post', 'head' or 'put'.
:param kwargs:
Keyword arguments from the matched :class:`Rule`.
:return:
A :class:`Response` instance.
"""
method = getattr(self, _method, None)
if method is None:
# 405 Method Not Allowed.
# The response MUST include an Allow header containing a
# list of valid methods for the requested resource.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.6
self.abort(405, valid_methods=get_valid_methods(self))
if not self.middleware:
# No middleware is set: just execute the method.
return method(*args, **kwargs)
# Get all middleware for this handler.
middleware = self.app.get_middleware(self, self.middleware)
# Execute pre_dispatch middleware.
for hook in middleware.get('pre_dispatch', []):
response = hook(self)
if response is not None:
break
else:
try:
# Execute the requested method.
response = method(*args, **kwargs)
except Exception, e:
# Execute handle_exception middleware.
for hook in middleware.get('handle_exception', []):
response = hook(e, self)
if response is not None:
break
else:
raise
# Make sure we have a response object.
response = self.app.make_response(self.request, response)
# Execute post_dispatch middleware.
for hook in middleware.get('post_dispatch', []):
response = hook(self, response)
# Done!
return response
def dispatch(self, _method, *args, **kwargs):
"""Deprecated method: a wrapper for :meth:`__call__`."""
return self(_method, *args, **kwargs)
def abort(self, code, *args, **kwargs):
"""Raises an :class:`HTTPException`. This stops code execution,
leaving the HTTP exception to be handled by an exception handler.
:param code:
HTTP status error code (e.g., 404).
:param args:
Positional arguments to be passed to the exception class.
:param kwargs:
Keyword arguments to be passed to the exception class.
"""
abort(code, *args, **kwargs)
def get_config(self, module, key=None, default=REQUIRED_VALUE):
"""Returns a configuration value for a module.
.. seealso:: :meth:`Config.get_or_load`.
"""
return self.app.config.get_or_load(module, key=key, default=default)
def handle_exception(self, exception=None, debug=False):
"""Handles an exception. The default behavior is to re-raise the
exception (no exception handling is implemented).
:param exception:
The exception that was thrown.
:param debug:
True if the exception should be handled in debug mode.
"""
raise
def redirect(self, location, code=302):
"""Issues an HTTP redirect to the given URL. This won't stop
code execution. You must return when calling this method::
return self.redirect('/some-path')
:param location:
An absolute URI.
:param code:
The HTTP status code for the redirect.
:return:
A :class:`Response` object with headers set for redirection.
"""
return redirect(location, code)
def redirect_to(self, _name, _code=302, **kwargs):
"""Convenience method mixing :meth:`redirect` and :meth:`url_for`:
redirects the client to a URL built using a named :class:`Rule`.
:param _name:
The rule name.
:param _code:
The HTTP status code for the redirect.
:param kwargs:
Keyword arguments to build the URL.
:return:
A :class:`Response` object with headers set for redirection.
"""
return self.redirect(self.url_for(_name, **kwargs), code=_code)
def url_for(self, _name, **kwargs):
"""Builds a URL for a named :class:`Rule`.
.. seealso:: :meth:`Request.url_for`.
"""
return self.request.url_for(_name, **kwargs)
class Request(WerkzeugRequest):
"""The :class:`Request` object contains all environment variables for the
current request: GET, POST, FILES, cookies and headers. Additionally
it stores the URL adapter bound to the request and information about the
matched URL rule.
"""
#: URL adapter bound to a request.
url_adapter = None
#: Matched URL rule for a request.
rule = None
#: Keyword arguments from the matched rule.
rule_args = None
#: Exception raised when matching URL rules, if any.
routing_exception = None
def __init__(self, environ):
"""Initializes the request. This also sets a context attribute to
hold variables valid for a single request.
"""
super(Request, self).__init__(environ)
# A registry for objects in use during a request.
self.registry = {}
# A context for template variables.
self.context = {}
def url_for(self, endpoint, _full=False, _method=None, _anchor=None,
**kwargs):
"""Builds and returns a URL for a named :class:`Rule`.
For example, if you have these rules registered in the application:
.. code-block::
Rule('/', endoint='home/main' handler='handlers.MyHomeHandler')
Rule('/wiki', endoint='wiki/start' handler='handlers.WikiHandler')
Here are some examples of how to generate URLs for them:
>>> url = url_for('home/main')
>>> '/'
>>> url = url_for('home/main', _full=True)
>>> 'http://localhost:8080/'
>>> url = url_for('wiki/start')
>>> '/wiki'
>>> url = url_for('wiki/start', _full=True)
>>> 'http://localhost:8080/wiki'
>>> url = url_for('wiki/start', _full=True, _anchor='my-heading')
>>> 'http://localhost:8080/wiki#my-heading'
:param endpoint:
The rule endpoint.
:param _full:
If True, returns an absolute URL. Otherwise, returns a
relative one.
:param _method:
The rule request method, in case there are different rules
for different request methods.
:param _anchor:
An anchor to add to the end of the URL.
:param kwargs:
Keyword arguments to build the URL.
:return:
An absolute or relative URL.
"""
url = self.url_adapter.build(endpoint, force_external=_full,
method=_method, values=kwargs)
if _anchor:
url += '#' + url_quote(_anchor)
return url
class Response(WerkzeugResponse):
"""A response object with default mimetype set to ``text/html``."""
default_mimetype = 'text/html'
class Tipfy(object):
"""The WSGI application which centralizes URL dispatching, configuration
and hooks for an App Rngine app.
"""
#: Default class for requests.
request_class = Request
#: Default class for responses.
response_class = Response
#: The active :class:`Tipfy` instance.
app = None
#: The active :class:`Request` instance.
request = None
def __init__(self, config=None, rules='urls.get_rules', debug=False):
"""Initializes the application.
:param config:
Dictionary with configuration for the application modules.
:param rules:
URL rules definitions for the application. It can be a list of
:class:`Rule`, a callable or a string defining a callable that
returns the rules list. The callable is called passing the WSGI
application as parameter. Default is ``urls.get_rules``: import
``get_rules()`` from *urls.py* and calls it passing the app.
"""
# Set the currently active wsgi app instance.
self.set_wsgi_app()
# Load default config and update with values for this instance.
self.config = Config(config, {'tipfy': default_config}, ['tipfy'])
# Set up a context registry for this app.
self.registry = {}
# Set a shortcut to the development flag.
self.dev = self.config.get('tipfy', 'dev', False)
# Cache for loaded handler classes.
self.handlers = {}
# Middleware factory and registry.
self.middleware_factory = MiddlewareFactory()
# Store the app middleware dict.
self.middleware = self.get_middleware(self, self.config.get('tipfy',
'middleware'))
# Initialize the URL map.
self.url_map = self.get_url_map(rules)
def __call__(self, environ, start_response):
"""Shortcut for :meth:`Tipfy.wsgi_app`."""
return self.wsgi_app(environ, start_response)
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
:meth:`Tipfy.__call__` so that middlewares can be applied without
losing a reference to the class. So instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
:param environ:
A WSGI environment.
:param start_response:
A callable accepting a status code, a list of headers and an
optional exception context to start the response.
"""
cleanup = True
try:
# Set the currently active wsgi app and request instances.
request = self.request_class(environ)
self.set_wsgi_app()
self.set_request(request)
# Make sure that the requested method is allowed in App Engine.
if request.method not in ALLOWED_METHODS:
abort(501)
# Match current URL and store routing exceptions if any.
self.match_url(request)
# Run pre_dispatch_handler middleware.
rv = self.pre_dispatch(request)
if rv is None:
# Dispatch the requested handler.
rv = self.dispatch(request)
# Run post_dispatch_handler middleware.
response = self.make_response(request, rv)
response = self.post_dispatch(request, response)
except RequestRedirect, e:
# Execute redirects raised by the routing system or the
# application.
response = e
except Exception, e:
# Handle HTTP and uncaught exceptions.
cleanup = not self.dev
response = self.handle_exception(request, e)
response = self.make_response(request, response)
finally:
# Do not clean request if we are in development mode and an
# exception happened. This allows the debugger to still access
# request and other variables in the interactive shell.
if cleanup:
self.cleanup()
# Call the response object as a WSGI application.
return response(environ, start_response)
def get_url_map(self, rules=None):
"""Returns a ``werkzeug.routing.Map`` instance with initial
:class:`Rule` definitions.
:param rules:
Initial list of :class:`Rule`, a callable or a string defining
a callable that returns the list of rules.
:return:
A ``werkzeug.routing.Map`` instance.
"""
if isinstance(rules, basestring):
try:
rules = import_string(rules)
except (AttributeError, ImportError), e:
logging.warning('Missing %s. No URL rules were loaded.' %
rules)
rules = None
if callable(rules):
try:
rules = rules(self)
except TypeError, e:
# Backwards compatibility:
# Previously get_rules() didn't receive the WSGI app.
rules = rules()
return Map(rules)
def add_url_rule(self, path, endpoint, handler, **kwargs):
"""Adds a rule to the URL map.
:param path:
The URL path.
:param endpoint:
The rule endpoint: an identifier for the rule.
:param handler:
A :class:`RequestHandler` class, or a module and class
specification as a string.
"""
rule = Rule(path, endpoint=endpoint, handler=handler, **kwargs)
self.url_map.add(rule)
def match_url(self, request):
"""Matches registered :class:`Rule` definitions against the request.
This will store the URL adapter, matched rule and rule arguments in
the :class: `Request` instance.
Three exceptions can occur when matching the rules: ``NotFound``,
``MethodNotAllowed`` or ``RequestRedirect``. If they are
raised, they are stored in the request for later use.
:param request:
A :class:`Request` instance.
:return:
None.
"""
# Bind url map to the current request location.
config = self.config.get('tipfy')
request.url_adapter = self.url_map.bind_to_environ(request.environ,
server_name=config.get('server_name'),
subdomain=config.get('subdomain'))
try:
# Match the path against registered rules.
request.rule, request.rule_args = request.url_adapter.match(
return_rule=True)
except HTTPException, e:
request.routing_exception = e
def pre_dispatch(self, request):
"""Executes pre_dispatch_handler middleware. If a middleware returns
anything, the chain is stopped and that value is retirned.
:param request:
A :class:`Request` instance.
:return:
The returned value from a middleware or None.
"""
for hook in self.middleware.get('pre_dispatch_handler', []):
rv = hook()
if rv is not None:
return rv
def dispatch(self, request):
"""Matches the current URL against registered rules and returns the
resut from the :class:`RequestHandler`.
:param request:
A :class:`Request` instance.
:return:
The returned value from a middleware or None.
"""
if request.routing_exception is not None:
raise request.routing_exception
handler = request.rule.handler
if isinstance(handler, basestring):
if handler not in self.handlers:
# Import handler set in matched rule.
self.handlers[handler] = import_string(handler)
handler = self.handlers[handler]
# Instantiate handler and dispatch requested method.
method = request.method.lower().replace('-', '_')
return handler(self, request)(method, **request.rule_args)
def post_dispatch(self, request, response):
"""Executes post_dispatch_handler middleware. All middleware are
executed and must return a response object.
:param request:
A :class:`Request` instance.
:param response:
The :class:`Response` returned from :meth:`Tipfy.pre_dispatch`
or :meth:`Tipfy.dispatch` and converted by
:meth:`Tipfy.make_response`.
:return:
A :class:`Response` instance.
"""
for hook in self.middleware.get('post_dispatch_handler', []):
response = hook(response)
return response
def make_response(self, request, rv):
"""Converts the return value from a handler to a real response
object that is an instance of :class:`Response`.
The following types are allowd for ``rv``:
response_class
The object is returned unchanged.
str
A response object is created with the string as body.
unicode
A response object is created with the string encoded to
utf-8 as body.
tuple
The response object is created with the contents of the
tuple as arguments.
WSGI function
The function is called as WSGI application and
buffered as response object.
This method comes from `Flask <http://flask.pocoo.org/>`_.
:param request:
A :class:`Request` instance.
:param rv:
The return value from the handler.
:return:
A :class:`Response` instance.
"""
if isinstance(rv, self.response_class):
return rv
if isinstance(rv, basestring):
return self.response_class(rv)
if isinstance(rv, tuple):
return self.response_class(*rv)
if rv is None:
raise ValueError('Handler did not return a response.')
return self.response_class.force_type(rv, request.environ)
def handle_exception(self, request, e):
"""Handles HTTPException or uncaught exceptions raised by the WSGI
application, optionally applying exception middleware.
:param request:
A :class:`Request` instance.
:param e:
The catched exception.
:return:
A :class:`Response` instance, if the exception is not raised.
"""
# Execute handle_exception middleware.
for hook in self.middleware.get('handle_exception', []):
response = hook(e)
if response is not None:
return response
if self.dev:
raise
logging.exception(e)
if isinstance(e, HTTPException):
return e
return InternalServerError()
def get_middleware(self, obj, classes):
"""Returns a dictionary of all middleware instance methods for a given
object.
:param obj:
The object to search for related middleware (:class:`Tipfy` or
:class:`RequestHandler` instance).
:param classes:
A list of middleware classes.
:return:
A dictionary with middleware instance methods.
"""
if not classes:
return {}
return self.middleware_factory.get_middleware(obj, classes)
def get_config(self, module, key=None, default=REQUIRED_VALUE):
"""Returns a configuration value for a module.
.. seealso:: :meth:`Config.get_or_load`.
"""
return self.config.get_or_load(module, key=key, default=default)
def set_wsgi_app(self):
"""Sets the currently active :class:`Tipfy` instance."""
Tipfy.app = self
def set_request(self, request):
"""Sets the currently active :class:`Request` instance.
:param request:
The currently active :class:`Request` instance.
"""
Tipfy.request = request
def cleanup(self):
"""Cleans :class:`Tipfy` variables at the end of a request."""
Tipfy.app = Tipfy.request = None
def get_test_client(self):
"""Creates a test client for this application.
:return:
A ``werkzeug.Client``, which is a :class:`Tipfy` wrapped
for tests.
"""
from werkzeug import Client
return Client(self, self.response_class, use_cookies=True)
def run(self):
"""Runs the app using ``CGIHandler``. This must be called inside a
``main()`` function in the file defined in *app.yaml* to run the
application::
# ...
app = Tipfy(rules=[
Rule('/', endpoint='home', handler=HelloWorldHandler),
])
def main():
app.run()
if __name__ == '__main__':
main()
"""
# Fix issue #772.
if self.dev:
fix_sys_path()
CGIHandler().run(self)
class Config(dict):
"""A simple configuration dictionary keyed by module name. This is a
dictionary of dictionaries. It requires all values to be dictionaries
and applies updates and default values to the inner dictionaries instead
of the first level one.
The configuration object is available as a ``config`` attribute of
:class:`Tipfy`. If is instantiated and populated when the app is built::
config = {}
config['my.module'] = {
'foo': 'bar',
}
app = Tipfy(rules=[Rule('/', endpoint='home', handler=MyHandler)], config=config)
Then to read configuration values, use :meth:`RequestHandler.get_config`::
class MyHandler(RequestHandler):
def get(self):
foo = self.get_config('my.module', 'foo')
# ...
"""
#: Loaded module configurations.
loaded = None
def __init__(self, value=None, default=None, loaded=None):
"""Initializes the configuration object.
:param value:
A dictionary of configuration dictionaries for modules.
:param default:
A dictionary of configuration dictionaries for default values.
:param loaded:
A list of modules to be marked as loaded.
"""
self.loaded = loaded or []
if value is not None:
assert isinstance(value, dict)
for module in value.keys():
self.update(module, value[module])
if default is not None:
assert isinstance(default, dict)
for module in default.keys():
self.setdefault(module, default[module])
def __setitem__(self, module, value):
"""Sets a configuration for a module, requiring it to be a dictionary.
:param module:
A module name for the configuration, e.g.: `tipfy.ext.i18n`.
:param value:
A dictionary of configurations for the module.
"""
assert isinstance(value, dict)
super(Config, self).__setitem__(module, value)
def get(self, module, key=None, default=None):
"""Returns a configuration value for given key in a given module.
>>> cfg = Config({'tipfy.ext.i18n': {'locale': 'pt_BR'})
>>> cfg.get('tipfy.ext.i18n')
{'locale': 'pt_BR'}
>>> cfg.get('tipfy.ext.i18n', 'locale')
pt_BR
>>> cfg.get('tipfy.ext.i18n', 'invalid-key')
None
>>> cfg.get('tipfy.ext.i18n', 'invalid-key', 'default-value')
default-value
:param module:
The module to get a configuration from, e.g.: `tipfy.ext.i18n`.
:param key:
The key from the module configuration.
:param default:
A default value to return when the configuration for the given
key is not set. It is only returned if **key** is defined.
:returns:
The configuration value.
"""
if module not in self:
if key is None:
return None
return default
if key is None:
return self[module]
if key not in self[module]:
return default
return self[module][key]
def get_or_load(self, module, key=None, default=REQUIRED_VALUE):
"""Returns a configuration value for a module. If it is not already
set, loads a ``default_config`` variable from the given module,
updates the app configuration with those default values and returns
the value for the given key. If the key is still not available,
returns the provided default value or raises an exception if no
default was provided.
Every module that allows some kind of configuration sets a
``default_config`` global variable that is loaded by this function,
cached and used in case the requested configuration was not defined
by the user.
:param module:
The configured module.
:param key:
The config key.
:param default:
A default value to return in case the configuration for
the module/key is not set.
:returns:
A configuration value.
"""
if module not in self.loaded:
# Load default configuration and update config.
values = import_string(module + '.default_config', silent=True)
if values:
self.setdefault(module, values)
self.loaded.append(module)
value = self.get(module, key, default)
if value is not REQUIRED_VALUE and not (key is None and value is None):
return value
if key is None and value is None:
raise KeyError('Module %s is not configured.' % module)
raise KeyError('Module %s requires the config key "%s" to be '
'set.' % (module, key))
def setdefault(self, module, value):
"""Sets a default configuration dictionary for a module.
>>> cfg = Config({'tipfy.ext.i18n': {'locale': 'pt_BR'})
>>> cfg.get('tipfy.ext.i18n', 'locale')
pt_BR
>>> cfg.get('tipfy.ext.i18n', 'foo')
None
>>> cfg.setdefault('tipfy.ext.i18n', {'locale': 'en_US', 'foo': 'bar'})
>>> cfg.get('tipfy.ext.i18n', 'locale')
pt_BR
>>> cfg.get('tipfy.ext.i18n', 'foo')
bar
:param module:
The module to set default configuration, e.g.: `tipfy.ext.i18n`.
:param value:
A dictionary of configurations for the module.
:returns:
None.
"""
assert isinstance(value, dict)
if module not in self:
self[module] = {}
for key in value.keys():
self[module].setdefault(key, value[key])
def update(self, module, value):
"""Updates the configuration dictionary for a module.
>>> cfg = Config({'tipfy.ext.i18n': {'locale': 'pt_BR'})
>>> cfg.get('tipfy.ext.i18n', 'locale')
pt_BR
>>> cfg.get('tipfy.ext.i18n', 'foo')
None
>>> cfg.update('tipfy.ext.i18n', {'locale': 'en_US', 'foo': 'bar'})
>>> cfg.get('tipfy.ext.i18n', 'locale')
en_US
>>> cfg.get('tipfy.ext.i18n', 'foo')
bar
:param module:
The module to update the configuration, e.g.: `tipfy.ext.i18n`.
:param value:
A dictionary of configurations for the module.
:returns:
None.
"""
assert isinstance(value, dict)
if module not in self:
self[module] = {}
self[module].update(value)
class MiddlewareFactory(object):
"""A factory and registry for middleware instances in use."""
#: All middleware methods to look for.
names = (
'post_make_app',
'pre_dispatch_handler',
'post_dispatch_handler',
'pre_dispatch',
'post_dispatch',
'handle_exception',
)
#: Methods that must run in reverse order.
reverse_names = (
'post_dispatch_handler',
'post_dispatch',
'handle_exception',
)
def __init__(self):
# Instantiated middleware.
self.instances = {}
# Methods from instantiated middleware.
self.methods = {}
# Middleware methods for a given object.
self.obj_middleware = {}
def get_middleware(self, obj, classes):
"""Returns a dictionary of all middleware instance methods for a given
object.
:param obj:
The object to search for related middleware (the :class:`Tipfy` or
:class:`RequestHandler`).
:param classes:
A list of middleware classes.
:return:
A dictionary with middleware instance methods.
"""
id = obj.__module__ + '.' + obj.__class__.__name__
if id not in self.obj_middleware:
self.obj_middleware[id] = self.load_middleware(classes)
return self.obj_middleware[id]
def load_middleware(self, specs):
"""Returns a dictionary of middleware instance methods for a list of
middleware specifications.
:param specs:
A list of middleware classes, classes as strings or instances.
:return:
A dictionary with middleware instance methods.
"""
res = {}
for spec in specs:
# Middleware can be defined in 3 forms: strings, classes and
# instances.
is_str = isinstance(spec, basestring)
is_obj = not is_str and not isinstance(spec, type)
if is_obj:
# Instance.
spec_id = id(spec)
obj = spec
elif is_str:
spec_id = spec
else:
spec_id = spec.__module__ + '.' + spec.__name__
if spec_id not in self.methods:
if is_str:
spec = import_string(spec, silent=True)
if not spec:
logging.warning('Missing %s. Middleware was not '
'loaded.' % spec)
continue
if not is_obj:
obj = spec()
self.instances[spec_id] = obj
self.methods[spec_id] = [getattr(obj, n, None) for n in \
self.names]
for name, method in zip(self.names, self.methods[spec_id]):
if method:
res.setdefault(name, []).append(method)
for name in self.reverse_names:
if name in res:
res[name].reverse()
return res
class Rule(WerkzeugRule):
"""Extends Werkzeug routing to support a handler definition for each Rule.
Handler is a :class:`RequestHandler` module and class specification, and
endpoint is a friendly name used to build URL's. For example:
.. code-block:: python
Rule('/users', endpoint='user-list', handler='my_app:UsersHandler')
Access to the URL ``/users`` loads ``UsersHandler`` class from
``my_app`` module. To generate a URL to that page, use :func:`url_for`::
url = url_for('user-list')
"""
def __init__(self, *args, **kwargs):
self.handler = kwargs.pop('handler', kwargs.get('endpoint', None))
super(Rule, self).__init__(*args, **kwargs)
def empty(self):
"""Returns an unbound copy of this rule. This can be useful if you
want to reuse an already bound URL for another map.
"""
defaults = None
if self.defaults is not None:
defaults = dict(self.defaults)
return Rule(self.rule, defaults, self.subdomain, self.methods,
self.build_only, self.endpoint, self.strict_slashes,
self.redirect_to, handler=self.handler)
class HandlerPrefix(RuleFactory):
"""Prefixes all handler values (which must be strings for this factory) of
nested rules with another string. For example, take these rules::
rules = [
HandlerPrefix('my_app.handlers.', [
Rule('/', endpoint='index', handler='IndexHandler'),
Rule('/entry/<entry_slug>', endpoint='show', handler='ShowHandler'),
]),
]
These are the same as::
rules = [
Rule('/', endpoint='index', handler='my_app.handlers.IndexHandler'),
Rule('/entry/<entry_slug>', endpoint='show', handler='my_app.handlers.ShowHandler'),
]
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.handler = self.prefix + rule.handler
yield rule
def get_config(module, key=None, default=REQUIRED_VALUE):
"""Returns a configuration value for a module.
.. seealso:: :meth:`Config.get_or_load`.
"""
return Tipfy.app.config.get_or_load(module, key=key, default=default)
def get_valid_methods(handler):
"""Returns a list of HTTP methods supported by a handler.
:param handler:
A :class:`RequestHandler` instance.
:returns:
A list of HTTP methods supported by the handler.
"""
return [method for method in ALLOWED_METHODS if
getattr(handler, method.lower().replace('-', '_'), None)]
def url_for(endpoint, _full=False, _method=None, _anchor=None, **kwargs):
"""Builds and returns a URL for a named :class:`Rule`.
This is a shortcut to :meth:`Request.url_for`.
"""
# For backwards compatibility, check old keywords.
full = kwargs.pop('full', _full)
method = kwargs.pop('method', _method)
return Tipfy.request.url_for(endpoint, _full=full, _method=method,
_anchor=_anchor, **kwargs)
def redirect_to(endpoint, _method=None, _anchor=None, _code=302, **kwargs):
"""Convenience function mixing ``werkzeug.redirect`` and
:meth:`Request.url_for`: redirects the client to a URL built using a named
:class:`Rule`.
:param endpoint:
The rule endpoint.
:param _method:
The rule request method, in case there are different rules
for different request methods.
:param _anchor:
An anchor to add to the end of the URL.
:param _code:
The redirect status code.
:param kwargs:
Keyword arguments to build the URL.
:return:
A :class:`Response` object with headers set for redirection.
"""
# For backwards compatibility, check old keywords.
method = kwargs.pop('method', _method)
code = kwargs.pop('code', _code)
url = Tipfy.request.url_for(endpoint, _full=True, _method=method,
_anchor=_anchor, **kwargs)
return redirect(url, code=code)
def render_json_response(*args, **kwargs):
"""Renders a JSON response.
:param args:
Arguments to be passed to simplejson.dumps().
:param kwargs:
Keyword arguments to be passed to simplejson.dumps().
:return:
A :class:`Response` object with a JSON string in the body and
mimetype set to ``application/json``.
"""
from django.utils import simplejson
return Response(simplejson.dumps(*args, **kwargs),
mimetype='application/json')
def make_wsgi_app(config=None, **kwargs):
"""Returns a instance of :class:`Tipfy`.
:param config:
A dictionary of configuration values.
:param kwargs:
Additional keyword arguments to instantiate :class:`Tipfy`.
:return:
A :class:`Tipfy` instance.
"""
app = Tipfy(config=config, **kwargs)
if app.dev:
logging.getLogger().setLevel(logging.DEBUG)
# Execute post_make_app middleware.
for hook in app.middleware.get('post_make_app', []):
app = hook(app)
return app
def run_wsgi_app(app):
"""Executes the application, optionally wrapping it by middleware.
.. warning::
This is deprecated. Use app.run() instead.
:param app:
A :class:`Tipfy` instance.
:return:
None.
"""
app.run()
_ULTIMATE_SYS_PATH = None
def fix_sys_path():
"""A fix for issue 772. We must keep this here until it is fixed in the dev
server.
See: http://code.google.com/p/googleappengine/issues/detail?id=772
"""
global _ULTIMATE_SYS_PATH
import sys
if _ULTIMATE_SYS_PATH is None:
_ULTIMATE_SYS_PATH = list(sys.path)
elif sys.path != _ULTIMATE_SYS_PATH:
sys.path[:] = _ULTIMATE_SYS_PATH
__all__ = [
'Config',
'DEFAULT_VALUE',
'EndpointPrefix',
'HTTPException',
'InternalServerError',
'Map',
'REQUIRED_VALUE',
'Request',
'RequestHandler',
'RequestRedirect',
'Response',
'Rule',
'RuleTemplate',
'Subdomain',
'Submount',
'Tipfy',
'abort',
'cached_property',
'default_config',
'escape',
'get_config',
'import_string',
'make_wsgi_app',
'redirect',
'redirect_to',
'render_json_response',
'run_wsgi_app',
'url_for',
'url_quote',
]
|
|
import numpy as np
import ipdb
import os
def convert_camel_to_underscore(name):
import re
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def get_config_name_abbr(config_name):
abbr = ''
uncamel_key = convert_camel_to_underscore(config_name)
for word in uncamel_key.split('_'):
abbr += word[0]
return abbr
def get_model_config_id(model_config):
model_id = ''
for config_key in model_config:
model_id += '%s_(%s)_'%(get_config_name_abbr(config_key), model_config[config_key])
return model_id
def parse_model_config_id(model_id):
items = model_id.strip('_').split('_')
model_config = {}
for idx in range(0, len(items), 2):
model_config[items[idx]] = items[idx+1][1:-1]
return model_config
def iter_from_X_lengths(X, lengths):
if lengths is None:
yield 0, len(X)
else:
n_samples = X.shape[0]
end = np.cumsum(lengths).astype(np.int32)
start = end - lengths
if end[-1] > n_samples:
raise ValueError("more than {0:d} samples in lengths array {1!s}"
.format(n_samples, lengths))
for i in range(len(lengths)):
yield start[i], end[i]
def fast_log_curve_calculation(X, model):
import hmmlearn.hmm
import hongminhmmpkg.hmm
import bnpy
if issubclass(type(model), hmmlearn.hmm._BaseHMM):
from sklearn.utils import check_array, check_random_state
from scipy.misc import logsumexp
X = check_array(X)
framelogprob = model._compute_log_likelihood(X[:])
logprobij, _fwdlattice = model._do_forward_pass(framelogprob)
log_curve = [logsumexp(_fwdlattice[i]) for i in range(len(_fwdlattice))]
return log_curve
elif issubclass(type(model.model), bnpy.HModel):
return model.calc_log(X)
else:
raise Exception('model of type %s is not supported by fast_log_curve_calculation.'%(type(model),))
def get_hidden_state_log_prob_matrix(X, model):
import hmmlearn.hmm
import hongminhmmpkg.hmm
import bnpy
if issubclass(type(model), hmmlearn.hmm._BaseHMM):
from sklearn.utils import check_array, check_random_state
from scipy.misc import logsumexp
X = check_array(X)
framelogprob = model._compute_log_likelihood(X[:])
logprobij, _fwdlattice = model._do_forward_pass(framelogprob)
return _fwdlattice
elif issubclass(type(model.model), bnpy.HModel):
raise Exception('hongmin BNPY not supported for now.')
else:
raise Exception('model of type %s is not supported by fast_log_curve_calculation.'%(type(model),))
def get_emission_log_prob_matrix(X, model):
import hmmlearn.hmm
import hongminhmmpkg.hmm
import bnpy
if issubclass(type(model), hmmlearn.hmm._BaseHMM):
from sklearn.utils import check_array, check_random_state
from scipy.misc import logsumexp
X = check_array(X)
framelogprob = model._compute_log_likelihood(X[:])
return framelogprob
elif issubclass(type(model.model), bnpy.HModel):
raise Exception('hongmin BNPY not supported for now.')
else:
raise Exception('model of type %s is not supported by fast_log_curve_calculation.'%(type(model),))
def log_mask_zero(a):
"""Computes the log of input probabilities masking divide by zero in log.
Notes
-----
During the M-step of EM-algorithm, very small intermediate start
or transition probabilities could be normalized to zero, causing a
*RuntimeWarning: divide by zero encountered in log*.
This function masks this unharmful warning.
"""
a = np.asarray(a)
with np.errstate(divide="ignore"):
a_log = np.log(a)
a_log[a <= 0] = 0.0
return a_log
def get_log_transmat(model):
import hmmlearn.hmm
import hongminhmmpkg.hmm
import bnpy
if issubclass(type(model), hmmlearn.hmm._BaseHMM):
from sklearn.utils import check_array, check_random_state
from scipy.misc import logsumexp
log_transmat = log_mask_zero(model.transmat_)
return log_transmat
elif issubclass(type(model.model), bnpy.HModel):
raise Exception('hongmin BNPY not supported for now.')
else:
raise Exception('model of type %s is not supported by fast_log_curve_calculation.'%(type(model),))
def make_trials_of_each_state_the_same_length(_trials_group_by_folder_name):
import copy
# may implement DTW in the future...
# for now we just align trials with the shortest trial of each state
trials_group_by_folder_name = copy.deepcopy(_trials_group_by_folder_name)
one_trial_data_group_by_state = trials_group_by_folder_name.itervalues().next()
state_amount = len(one_trial_data_group_by_state)
for state_no in range(1, state_amount+1):
min_length = None
for trial_name in trials_group_by_folder_name:
# remember that the actual data is a numpy matrix
# so we use *.shape[0] to get the length
now_length = trials_group_by_folder_name[trial_name][state_no].shape[0]
if min_length is None or now_length < min_length:
min_length = now_length
# align all trials in this state to min_length
for trial_name in trials_group_by_folder_name:
trials_group_by_folder_name[trial_name][state_no] = trials_group_by_folder_name[trial_name][state_no][:min_length, :]
return trials_group_by_folder_name
def get_trials_group_by_folder_name(training_config, data_class='success'):
import copy
if data_class == 'success':
data_path = training_config.success_path
elif data_class == 'anomaly':
data_path = training_config.anomaly_data_path
elif data_class == 'test_success':
data_path = training_config.test_success_data_path
else:
raise Exception("unknown data class %s"%data_class)
import load_csv_data
trials_group_by_folder_name, state_order_group_by_folder_name = load_csv_data.run(
data_path = data_path,
interested_data_fields = training_config.interested_data_fields,
preprocessing_normalize = training_config.preprocessing_normalize,
preprocessing_scaling = training_config.preprocessing_scaling
)
return trials_group_by_folder_name, state_order_group_by_folder_name
def inform_config(training_config):
import json
config_to_print = [
'training_config.config_by_user',
'training_config.interested_data_fields',
'training_config.model_config',
'training_config.model_id',
]
for s in config_to_print:
print '-'*20
print s, ':'
print json.dumps(
eval(s),
indent=4,
)
print '#'*20
print "press any key to continue."
raw_input()
def bring_model_id_back_to_model_config(model_id, template):
import copy
config_to_return = copy.deepcopy(template)
str_model_config = parse_model_config_id(model_id)
for config_key in config_to_return:
type_of_value = type(config_to_return[config_key])
config_to_return[config_key] = type_of_value(str_model_config[get_config_name_abbr(config_key)])
return config_to_return
def log_mask_zero(a):
"""Computes the log of input probabilities masking divide by zero in log.
Notes
-----
During the M-step of EM-algorithm, very small intermediate start
or transition probabilities could be normalized to zero, causing a
*RuntimeWarning: divide by zero encountered in log*.
This function masks this unharmful warning.
"""
a = np.asarray(a)
with np.errstate(divide="ignore"):
a_log = np.log(a)
a_log[a <= 0] = 0.0
return a_log
def fast_viterbi_lock_t_cal(X, model):
import hmmlearn.hmm
import hongminhmmpkg.hmm
import bnpy
if issubclass(type(model), hmmlearn.hmm._BaseHMM):
from sklearn.utils import check_array, check_random_state
from scipy.misc import logsumexp
X = check_array(X)
framelogprob = model._compute_log_likelihood(X[:])
n_samples, n_components = framelogprob.shape
log_startprob = log_mask_zero(model.startprob_)
log_transmat = log_mask_zero(model.transmat_)
work_buffer = np.empty(n_components)
list_of_lock_t = []
viterbi_lattice = np.zeros((n_samples, n_components))
viterbi_trace = np.zeros((n_samples, n_components))
for i in range(n_components):
viterbi_lattice[0, i] = log_startprob[i] + framelogprob[0, i]
viterbi_trace[0, i] = 0
list_of_lock_t.append(None)
# Induction
for t in range(1, n_samples):
for i in range(n_components):
for j in range(n_components):
work_buffer[j] = (log_transmat[j, i]
+ viterbi_lattice[t - 1, j])
prev_state = np.argmax(work_buffer)
viterbi_lattice[t, i] = work_buffer[prev_state] + framelogprob[t, i]
viterbi_trace[t, i] = prev_state
# backtract
lock_t = None
for k in range(t, 0, -1):
if np.all(viterbi_trace[k, :] == viterbi_trace[k, 0]):
lock_t = k-1
break
list_of_lock_t.append(lock_t)
return list_of_lock_t, n_samples, n_components
else:
raise Exception('model of type %s is not supported by fast_log_curve_calculation.'%(type(model),))
def fast_growing_viterbi_paths_cal(X, model):
import hmmlearn.hmm
import hongminhmmpkg.hmm
import bnpy
if issubclass(type(model), hmmlearn.hmm._BaseHMM):
from sklearn.utils import check_array, check_random_state
from scipy.misc import logsumexp
X = check_array(X)
framelogprob = model._compute_log_likelihood(X[:])
n_samples, n_components = framelogprob.shape
log_startprob = log_mask_zero(model.startprob_)
log_transmat = log_mask_zero(model.transmat_)
work_buffer = np.empty(n_components)
list_of_growing_viterbi_paths = []
viterbi_lattice = np.zeros((n_samples, n_components))
viterbi_trace = np.zeros((n_samples, n_components))
for i in range(n_components):
viterbi_lattice[0, i] = log_startprob[i] + framelogprob[0, i]
viterbi_trace[0, i] = 0
list_of_growing_viterbi_paths.append([np.argmax(viterbi_lattice[0])])
# Induction
for t in range(1, n_samples):
for i in range(n_components):
for j in range(n_components):
work_buffer[j] = (log_transmat[j, i]
+ viterbi_lattice[t - 1, j])
prev_state = np.argmax(work_buffer)
viterbi_lattice[t, i] = work_buffer[prev_state] + framelogprob[t, i]
viterbi_trace[t, i] = prev_state
best_state_at_t = np.argmax(viterbi_lattice[t, :])
viterbi_path = [0 for k in range(t+1)]
viterbi_path[t] = best_state_at_t
# backtract
for k in range(t, 0, -1):
forward_z = viterbi_path[k]
viterbi_path[k-1] = int(viterbi_trace[k, forward_z])
list_of_growing_viterbi_paths.append(viterbi_path)
return list_of_growing_viterbi_paths, n_samples, n_components
else:
raise Exception('model of type %s is not supported by fast_log_curve_calculation.'%(type(model),))
def rgba_to_rgb_using_white_bg(rgb_array, alpha):
return [i*alpha+(1-alpha) for i in rgb_array]
def gray_a_pixel(pixel):
import numpy as np
p = np.array(pixel)
p += 50
p /= 2
return (p[0], p[1], p[2])
def output_growing_viterbi_path_img(
list_of_growing_viterbi_paths,
hidden_state_amount,
output_file_path,
list_of_lock_t=None,
):
from matplotlib.pyplot import cm
import numpy as np
height = len(list_of_growing_viterbi_paths)
width = len(list_of_growing_viterbi_paths[-1])
colors = [tuple((256*i).astype(int)) for i in cm.rainbow(np.linspace(0, 1, hidden_state_amount))]
output_pixels = []
for idx, vp in enumerate(list_of_growing_viterbi_paths):
black_to_append = width-len(vp)
row = [colors[i] for i in vp]+[(0,0,0) for i in range(black_to_append)]
if list_of_lock_t is not None:
lock_t = list_of_lock_t[idx]
if lock_t is not None:
for i in range(lock_t+1):
row[i] = gray_a_pixel(row[i])
output_pixels += row
from PIL import Image
output_dir = os.path.dirname(output_file_path)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
output_img = Image.new("RGB", (width, height)) # mode,(width,height)
output_img.putdata(output_pixels)
output_img.save(
output_file_path,
)
def _norm_loglik(a):
from scipy.misc import logsumexp
s = logsumexp(a)
a = np.exp(a-s)
return a
def visualize_viterbi_alog(X, model, path):
import hmmlearn.hmm
import hongminhmmpkg.hmm
import bnpy
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
fig = plt.figure()
ax = fig.add_subplot(111)
nodes_x = []
nodes_y = []
nodes_color = []
edges = []
if issubclass(type(model), hmmlearn.hmm._BaseHMM):
from sklearn.utils import check_array, check_random_state
from scipy.misc import logsumexp
X = check_array(X)
framelogprob = model._compute_log_likelihood(X[:])
n_samples, n_components = framelogprob.shape
log_startprob = log_mask_zero(model.startprob_)
log_transmat = log_mask_zero(model.transmat_)
work_buffer = np.empty(n_components)
connection = np.empty((n_components, n_components))
list_of_growing_viterbi_paths = []
viterbi_lattice = np.zeros((n_samples, n_components))
viterbi_trace = np.zeros((n_samples, n_components))
for i in range(n_components):
viterbi_lattice[0, i] = log_startprob[i] + framelogprob[0, i]
viterbi_trace[0, i] = 0
tmp = _norm_loglik(viterbi_lattice[0])
for i in range(n_components):
nodes_x.append(0)
nodes_y.append(i)
nodes_color.append('#%02x%02x%02x%02x'%(0, 0, 0, int(255*tmp[i])))
# Induction
for t in range(1, n_samples):
for i in range(n_components):
for j in range(n_components):
work_buffer[j] = (log_transmat[j, i]
+ viterbi_lattice[t - 1, j])
connection[j, i] = log_transmat[j, i]+framelogprob[t, i]
prev_state = np.argmax(work_buffer)
viterbi_lattice[t, i] = work_buffer[prev_state] + framelogprob[t, i]
viterbi_trace[t, i] = prev_state
tmp = _norm_loglik(connection.flatten()).reshape((n_components, n_components))
for i in range(n_components):
for j in range(n_components):
edges.append((t-1,t))
edges.append((j,i))
edges.append('#%02x%02x%02x%02x'%(0, 0, 0, int(255*tmp[j][i])))
edges.append((t-1,t))
edges.append((viterbi_trace[t, i]+0.1,i+0.1))
edges.append('k:')
tmp = _norm_loglik(viterbi_lattice[t])
for i in range(n_components):
nodes_x.append(t)
nodes_y.append(i)
nodes_color.append('#%02x%02x%02x%02x'%(0, 0, 0, int(255*tmp[i])))
else:
raise Exception('model of type %s is not supported by visualize_viterbi_alog.'%(type(model),))
ax.plot(*edges)
ax.scatter(x=nodes_x, y=nodes_y, c=nodes_color)
fig.set_size_inches(0.5*n_samples, 0.25*n_components)
fig.savefig(path)
print 'done one viterbi alog graph'
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods to upload source to GCS and call Cloud Build service."""
import gzip
import os
import StringIO
import tarfile
from apitools.base.py import encoding
from docker import docker
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.storage import storage_api
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import times
# Paths that shouldn't be ignored client-side.
# Behavioral parity with github.com/docker/docker-py.
BLACKLISTED_DOCKERIGNORE_PATHS = ['Dockerfile', '.dockerignore']
class UploadFailedError(exceptions.Error):
"""Raised when the source fails to upload to GCS."""
def _CreateTar(source_dir, gen_files, paths, gz):
"""Create tarfile for upload to GCS.
The third-party code closes the tarfile after creating, which does not
allow us to write generated files after calling docker.utils.tar
since gzipped tarfiles can't be opened in append mode.
Args:
source_dir: the directory to be archived
gen_files: Generated files to write to the tar
paths: allowed paths in the tarfile
gz: gzipped tarfile object
"""
root = os.path.abspath(source_dir)
t = tarfile.open(mode='w', fileobj=gz)
for path in sorted(paths):
full_path = os.path.join(root, path)
t.add(full_path, arcname=path, recursive=False)
for name, contents in gen_files.iteritems():
genfileobj = StringIO.StringIO(contents)
tar_info = tarfile.TarInfo(name=name)
tar_info.size = len(genfileobj.buf)
t.addfile(tar_info, fileobj=genfileobj)
genfileobj.close()
t.close()
def _GetDockerignoreExclusions(source_dir, gen_files):
"""Helper function to read the .dockerignore on disk or in generated files.
Args:
source_dir: the path to the root directory.
gen_files: dict of filename to contents of generated files.
Returns:
Set of exclusion expressions from the dockerignore file.
"""
dockerignore = os.path.join(source_dir, '.dockerignore')
exclude = set()
ignore_contents = None
if os.path.exists(dockerignore):
with open(dockerignore) as f:
ignore_contents = f.read()
else:
ignore_contents = gen_files.get('.dockerignore')
if ignore_contents:
# Read the exclusions from the dockerignore, filtering out blank lines.
exclude = set(filter(bool, ignore_contents.splitlines()))
# Remove paths that shouldn't be excluded on the client.
exclude -= set(BLACKLISTED_DOCKERIGNORE_PATHS)
return exclude
def _GetIncludedPaths(source_dir, exclude, skip_files=None):
"""Helper function to filter paths in root using dockerignore and skip_files.
We iterate separately to filter on skip_files in order to preserve expected
behavior (standard deployment skips directories if they contain only files
ignored by skip_files).
Args:
source_dir: the path to the root directory.
exclude: the .dockerignore file exclusions.
skip_files: the regex for files to skip. If None, only dockerignore is used
to filter.
Returns:
Set of paths (relative to source_dir) to include.
"""
# This code replicates how docker.utils.tar() finds the root
# and excluded paths.
root = os.path.abspath(source_dir)
# Get set of all paths other than exclusions from dockerignore.
paths = docker.utils.exclude_paths(root, exclude)
# Also filter on the ignore regex from the app.yaml.
if skip_files:
included_paths = set(util.FileIterator(source_dir, skip_files))
# FileIterator replaces all path separators with '/', so reformat
# the results to compare with the first set.
included_paths = {
p.replace('/', os.path.sep) for p in included_paths}
paths.intersection_update(included_paths)
return paths
def UploadSource(source_dir, object_ref, gen_files=None, skip_files=None):
"""Upload a gzipped tarball of the source directory to GCS.
Note: To provide parity with docker's behavior, we must respect .dockerignore.
Args:
source_dir: the directory to be archived.
object_ref: storage_util.ObjectReference, the Cloud Storage location to
upload the source tarball to.
gen_files: dict of filename to (str) contents of generated config and
source context files.
skip_files: optional, a parsed regex for paths and files to skip, from
the service yaml.
Raises:
UploadFailedError: when the source fails to upload to GCS.
"""
gen_files = gen_files or {}
dockerignore_contents = _GetDockerignoreExclusions(source_dir, gen_files)
included_paths = _GetIncludedPaths(source_dir,
dockerignore_contents,
skip_files)
# We can't use tempfile.NamedTemporaryFile here because ... Windows.
# See https://bugs.python.org/issue14243. There are small cleanup races
# during process termination that will leave artifacts on the filesystem.
# eg, CTRL-C on windows leaves both the directory and the file. Unavoidable.
# On Posix, `kill -9` has similar behavior, but CTRL-C allows cleanup.
with files.TemporaryDirectory() as temp_dir:
f = open(os.path.join(temp_dir, 'src.tgz'), 'w+b')
with gzip.GzipFile(mode='wb', fileobj=f) as gz:
_CreateTar(source_dir, gen_files, included_paths, gz)
f.close()
storage_client = storage_api.StorageClient()
storage_client.CopyFileToGCS(object_ref.bucket_ref, f.name, object_ref.name)
def GetServiceTimeoutString(timeout_property_str):
if timeout_property_str is not None:
try:
# A bare number is interpreted as seconds.
build_timeout_secs = int(timeout_property_str)
except ValueError:
build_timeout_duration = times.ParseDuration(timeout_property_str)
build_timeout_secs = int(build_timeout_duration.total_seconds)
return str(build_timeout_secs) + 's'
return None
class InvalidBuildError(ValueError):
"""Error indicating that ExecuteCloudBuild was given a bad Build message."""
def __init__(self, field):
super(InvalidBuildError, self).__init__(
'Field [{}] was provided, but should not have been. '
'You may be using an improper Cloud Build pipeline.'.format(field))
def _ValidateBuildFields(build, fields):
"""Validates that a Build message doesn't have fields that we populate."""
for field in fields:
if getattr(build, field, None) is not None:
raise InvalidBuildError(field)
def GetDefaultBuild(output_image):
"""Get the default build for this runtime.
This build just uses the latest docker builder image (location pulled from the
app/container_builder_image property) to run a `docker build` with the given
tag.
Args:
output_image: GCR location for the output docker image (e.g.
`gcr.io/test-gae/hardcoded-output-tag`)
Returns:
Build, a CloudBuild Build message with the given steps (ready to be given to
FixUpBuild).
"""
messages = cloudbuild_util.GetMessagesModule()
builder = properties.VALUES.app.container_builder_image.Get()
log.debug('Using builder image: [{0}]'.format(builder))
return messages.Build(
steps=[messages.BuildStep(name=builder,
args=['build', '-t', output_image, '.'])],
images=[output_image])
def FixUpBuild(build, object_ref):
"""Return a modified Build object with run-time values populated.
Specifically:
- `source` is pulled from the given object_ref
- `timeout` comes from the app/cloud_build_timeout property
- `logsBucket` uses the bucket from object_ref
Args:
build: cloudbuild Build message. The Build to modify. Fields 'timeout',
'source', and 'logsBucket' will be added and may not be given.
object_ref: storage_util.ObjectReference, the Cloud Storage location of the
source tarball.
Returns:
Build, (copy) of the given Build message with the specified fields
populated.
Raises:
InvalidBuildError: if the Build message had one of the fields this function
sets pre-populated
"""
messages = cloudbuild_util.GetMessagesModule()
# Make a copy, so we don't modify the original
build = encoding.CopyProtoMessage(build)
# Check that nothing we're expecting to fill in has been set already
_ValidateBuildFields(build, ('source', 'timeout', 'logsBucket'))
build.timeout = GetServiceTimeoutString(
properties.VALUES.app.cloud_build_timeout.Get())
build.logsBucket = object_ref.bucket
build.source = messages.Source(
storageSource=messages.StorageSource(
bucket=object_ref.bucket,
object=object_ref.name,
),
)
return build
|
|
################################################################################
# Copyright (C) 2013-2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `dot` module.
"""
import unittest
import numpy as np
import scipy
from numpy import testing
from ..dot import Dot, SumMultiply
from ..gaussian import Gaussian, GaussianARD
from bayespy.nodes import GaussianGamma
from ...vmp import VB
from bayespy.utils import misc
from bayespy.utils import linalg
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestSumMultiply(TestCase):
def test_parent_validity(self):
"""
Test that the parent nodes are validated properly in SumMultiply
"""
V = GaussianARD(1, 1)
X = Gaussian(np.ones(1), np.identity(1))
Y = Gaussian(np.ones(3), np.identity(3))
Z = Gaussian(np.ones(5), np.identity(5))
A = SumMultiply(X, ['i'])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply('i', X)
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(X, ['i'], ['i'])
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply('i->i', X)
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply(X, ['i'], Y, ['j'], ['i','j'])
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply('i,j->ij', X, Y)
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply(V, [], X, ['i'], Y, ['i'], [])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(',i,i->', V, X, Y)
self.assertEqual(A.dims, ((), ()))
# Gaussian-gamma parents
C = GaussianGamma(np.ones(3), np.identity(3), 1, 1)
A = SumMultiply(Y, ['i'], C, ['i'], ['i'])
self.assertEqual(A.dims, ((3,), (3,3), (), ()))
A = SumMultiply('i,i->i', Y, C)
self.assertEqual(A.dims, ((3,), (3,3), (), ()))
C = GaussianGamma(np.ones(3), np.identity(3), 1, 1)
A = SumMultiply(Y, ['i'], C, ['i'], [])
self.assertEqual(A.dims, ((), (), (), ()))
A = SumMultiply('i,i->', Y, C)
self.assertEqual(A.dims, ((), (), (), ()))
# Error: not enough inputs
self.assertRaises(ValueError,
SumMultiply)
self.assertRaises(ValueError,
SumMultiply,
X)
# Error: too many keys
self.assertRaises(ValueError,
SumMultiply,
Y,
['i', 'j'])
self.assertRaises(ValueError,
SumMultiply,
'ij',
Y)
# Error: not broadcastable
self.assertRaises(ValueError,
SumMultiply,
Y,
['i'],
Z,
['i'])
self.assertRaises(ValueError,
SumMultiply,
'i,i',
Y,
Z)
# Error: output key not in inputs
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['j'])
self.assertRaises(ValueError,
SumMultiply,
'i->j',
X)
# Error: non-unique input keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'ii',
X)
# Error: non-unique output keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'i->ii',
X)
# String has too many '->'
self.assertRaises(ValueError,
SumMultiply,
'i->i->i',
X)
# String has too many input nodes
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X)
# Same parent several times
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X,
X)
# Same parent several times via deterministic node
Xh = SumMultiply('i->i', X)
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X,
Xh)
def test_message_to_child(self):
"""
Test the message from SumMultiply to its children.
"""
def compare_moments(u0, u1, *args):
Y = SumMultiply(*args)
u_Y = Y.get_moments()
self.assertAllClose(u_Y[0], u0)
self.assertAllClose(u_Y[1], u1)
# Test constant parent
y = np.random.randn(2,3,4)
compare_moments(y,
linalg.outer(y, y, ndim=2),
'ij->ij',
y)
# Do nothing for 2-D array
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
compare_moments(y[0],
y[1],
'ij->ij',
Y)
compare_moments(y[0],
y[1],
Y,
[0,1],
[0,1])
# Sum over the rows of a matrix
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
mu = np.einsum('...ij->...j', y[0])
cov = np.einsum('...ijkl->...jl', y[1])
compare_moments(mu,
cov,
'ij->j',
Y)
compare_moments(mu,
cov,
Y,
[0,1],
[1])
# Inner product of three vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
X3 = GaussianARD(np.random.randn(7,6,5,2),
np.random.rand(7,6,5,2),
plates=(7,6,5),
shape=(2,))
x3 = X3.get_moments()
mu = np.einsum('...i,...i,...i->...', x1[0], x2[0], x3[0])
cov = np.einsum('...ij,...ij,...ij->...', x1[1], x2[1], x3[1])
compare_moments(mu,
cov,
'i,i,i',
X1,
X2,
X3)
compare_moments(mu,
cov,
'i,i,i->',
X1,
X2,
X3)
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9])
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9],
[])
# Outer product of two vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(5,),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
mu = np.einsum('...i,...j->...ij', x1[0], x2[0])
cov = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1])
compare_moments(mu,
cov,
'i,j->ij',
X1,
X2)
compare_moments(mu,
cov,
X1,
[9],
X2,
[7],
[9,7])
# Matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ik,...kj->...ij', y1[0], y2[0])
cov = np.einsum('...ikjl,...kmln->...imjn', y1[1], y2[1])
compare_moments(mu,
cov,
'ik,kj->ij',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','k'],
Y2,
['k','j'],
['i','j'])
# Trace of a matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ij,...ji->...', y1[0], y2[0])
cov = np.einsum('...ikjl,...kilj->...', y1[1], y2[1])
compare_moments(mu,
cov,
'ij,ji',
Y1,
Y2)
compare_moments(mu,
cov,
'ij,ji->',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'])
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'],
[])
# Vector-matrix-vector product
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
plates=(),
shape=(3,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
Y = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y = Y.get_moments()
mu = np.einsum('...i,...ij,...j->...', x1[0], y[0], x2[0])
cov = np.einsum('...ia,...ijab,...jb->...', x1[1], y[1], x2[1])
compare_moments(mu,
cov,
'i,ij,j',
X1,
Y,
X2)
compare_moments(mu,
cov,
X1,
[1],
Y,
[1,2],
X2,
[2])
# Complex sum-product of 0-D, 1-D, 2-D and 3-D arrays
V = GaussianARD(np.random.randn(7,6,5),
np.random.rand(7,6,5),
plates=(7,6,5),
shape=())
v = V.get_moments()
X = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x = X.get_moments()
Y = GaussianARD(np.random.randn(3,4),
np.random.rand(3,4),
plates=(5,),
shape=(3,4))
y = Y.get_moments()
Z = GaussianARD(np.random.randn(4,2,3),
np.random.rand(4,2,3),
plates=(6,5),
shape=(4,2,3))
z = Z.get_moments()
mu = np.einsum('...,...i,...kj,...jik->...k', v[0], x[0], y[0], z[0])
cov = np.einsum('...,...ia,...kjcb,...jikbac->...kc', v[1], x[1], y[1], z[1])
compare_moments(mu,
cov,
',i,kj,jik->k',
V,
X,
Y,
Z)
compare_moments(mu,
cov,
V,
[],
X,
['i'],
Y,
['k','j'],
Z,
['j','i','k'],
['k'])
#
# Gaussian-gamma parents
#
# Outer product of vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianGamma(
np.random.randn(6,1,2),
random.covariance(2),
np.random.rand(6,1),
np.random.rand(6,1),
plates=(6,1)
)
x2 = X2.get_moments()
Y = SumMultiply('i,j->ij', X1, X2)
u = Y._message_to_child()
y = np.einsum('...i,...j->...ij', x1[0], x2[0])
yy = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1])
self.assertAllClose(u[0], y)
self.assertAllClose(u[1], yy)
self.assertAllClose(u[2], x2[2])
self.assertAllClose(u[3], x2[3])
pass
def test_message_to_parent(self):
"""
Test the message from SumMultiply node to its parents.
"""
data = 2
tau = 3
def check_message(true_m0, true_m1, parent, *args, F=None):
if F is None:
A = SumMultiply(*args)
B = GaussianARD(A, tau)
B.observe(data*np.ones(A.plates + A.dims[0]))
else:
A = F
(A_m0, A_m1) = A._message_to_parent(parent)
self.assertAllClose(true_m0, A_m0)
self.assertAllClose(true_m1, A_m1)
pass
# Check: different message to each of multiple parents
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x2 = X2.get_moments()
m0 = tau * data * x2[0]
m1 = -0.5 * tau * x2[1] * np.identity(2)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[9],
X2,
[9],
[9])
m0 = tau * data * x1[0]
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
'i,i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[9],
X2,
[9],
[9])
# Check: key not in output
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
m0 = tau * data * np.ones(2)
m1 = -0.5 * tau * np.ones((2,2))
check_message(m0, m1, 0,
'i',
X1)
check_message(m0, m1, 0,
'i->',
X1)
check_message(m0, m1, 0,
X1,
[9])
check_message(m0, m1, 0,
X1,
[9],
[])
# Check: key not in some input
X1 = GaussianARD(np.random.randn(),
np.random.rand())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x2 = X2.get_moments()
m0 = tau * data * np.sum(x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * np.identity(2),
axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
[9],
[9])
m0 = tau * data * x1[0] * np.ones(2)
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
[9],
[9])
# Check: keys in different order
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
ndim=2)
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(2,3),
np.random.rand(2,3),
ndim=2)
y2 = Y2.get_moments()
m0 = tau * data * y2[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y2[1] * misc.identity(2,3))
check_message(m0, m1, 0,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 0,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
m0 = tau * data * y1[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y1[1] * misc.identity(3,2))
check_message(m0, m1, 1,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 1,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
# Check: plates when different dimensionality
X1 = GaussianARD(np.random.randn(5),
np.random.rand(5),
shape=(),
plates=(5,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(5,3),
np.random.rand(5,3),
shape=(3,),
plates=(5,))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,3)) * x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * misc.identity(3), axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
['i'],
['i'])
m0 = tau * data * x1[0][:,np.newaxis] * np.ones((5,3))
m1 = -0.5 * tau * x1[1][:,np.newaxis,np.newaxis] * misc.identity(3)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node has the
# same plates
X1 = GaussianARD(np.random.randn(5,4,3),
np.random.rand(5,4,3),
shape=(3,),
plates=(5,4))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.ones((5,4,3)) * x2[0]
m1 = -0.5 * tau * x2[1] * misc.identity(3)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node does
# not have that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1))
m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1))
* misc.identity(3)
* x2[1],
axis=(0,1))
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when the node
# only broadcasts that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(1,1))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1), keepdims=True)
m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1))
* misc.identity(3)
* x2[1],
axis=(0,1),
keepdims=True)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: broadcasted dimensions
X1 = GaussianARD(np.random.randn(1,1),
np.random.rand(1,1),
ndim=2)
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
ndim=2)
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((3,2)) * x2[0],
keepdims=True)
m1 = -0.5 * tau * np.sum(misc.identity(3,2) * x2[1],
keepdims=True)
check_message(m0, m1, 0,
'ij,ij->ij',
X1,
X2)
check_message(m0, m1, 0,
X1,
[0,1],
X2,
[0,1],
[0,1])
m0 = tau * data * np.ones((3,2)) * x1[0]
m1 = -0.5 * tau * misc.identity(3,2) * x1[1]
check_message(m0, m1, 1,
'ij,ij->ij',
X1,
X2)
check_message(m0, m1, 1,
X1,
[0,1],
X2,
[0,1],
[0,1])
# Check: non-ARD observations
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
Lambda = np.array([[2, 1.5], [1.5, 2]])
F = SumMultiply('i->i', X1)
Y = Gaussian(F, Lambda)
y = np.random.randn(2)
Y.observe(y)
m0 = np.dot(Lambda, y)
m1 = -0.5 * Lambda
check_message(m0, m1, 0,
'i->i',
X1,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
['i'],
F=F)
# Check: mask with same shape
X1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
shape=(2,),
plates=(3,))
x1 = X1.get_moments()
mask = np.array([True, False, True])
F = SumMultiply('i->i', X1)
Y = GaussianARD(F, tau, ndim=1)
Y.observe(data*np.ones((3,2)), mask=mask)
m0 = tau * data * mask[:,np.newaxis] * np.ones(2)
m1 = -0.5 * tau * mask[:,np.newaxis,np.newaxis] * np.identity(2)
check_message(m0, m1, 0,
'i->i',
X1,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
['i'],
F=F)
# Check: mask larger
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
shape=(2,),
plates=())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
shape=(2,),
plates=(3,))
x2 = X2.get_moments()
mask = np.array([True, False, True])
F = SumMultiply('i,i->i', X1, X2)
Y = GaussianARD(F, tau,
plates=(3,),
ndim=1)
Y.observe(data*np.ones((3,2)), mask=mask)
m0 = tau * data * np.sum(mask[:,np.newaxis] * x2[0], axis=0)
m1 = -0.5 * tau * np.sum(mask[:,np.newaxis,np.newaxis]
* x2[1]
* np.identity(2),
axis=0)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'],
F=F)
# Check: mask for broadcasted plate
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1,
plates=(1,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1,
plates=(3,))
x2 = X2.get_moments()
mask = np.array([True, False, True])
F = SumMultiply('i,i->i', X1, X2)
Y = GaussianARD(F, tau,
plates=(3,),
ndim=1)
Y.observe(data*np.ones((3,2)), mask=mask)
m0 = tau * data * np.sum(mask[:,np.newaxis] * x2[0],
axis=0,
keepdims=True)
m1 = -0.5 * tau * np.sum(mask[:,np.newaxis,np.newaxis]
* x2[1]
* np.identity(2),
axis=0,
keepdims=True)
check_message(m0, m1, 0,
'i->i',
X1,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
['i'],
F=F)
# Check: Gaussian-gamma parents
X1 = GaussianGamma(
np.random.randn(2),
random.covariance(2),
np.random.rand(),
np.random.rand()
)
x1 = X1.get_moments()
X2 = GaussianGamma(
np.random.randn(2),
random.covariance(2),
np.random.rand(),
np.random.rand()
)
x2 = X2.get_moments()
F = SumMultiply('i,i->i', X1, X2)
V = random.covariance(2)
y = np.random.randn(2)
Y = Gaussian(F, V)
Y.observe(y)
m0 = np.dot(V, y) * x2[0]
m1 = -0.5 * V * x2[1]
m2 = -0.5 * np.einsum('i,ij,j', y, V, y) * x2[2]#linalg.inner(V, x2[2], ndim=2)
m3 = 0.5 * 2 #linalg.chol_logdet(linalg.chol(V)) + 2*x2[3]
m = F._message_to_parent(0)
self.assertAllClose(m[0], m0)
self.assertAllClose(m[1], m1)
self.assertAllClose(m[2], m2)
self.assertAllClose(m[3], m3)
pass
def check_performance(scale=1e2):
"""
Tests that the implementation of SumMultiply is efficient.
This is not a unit test (not run automatically), but rather a
performance test, which you may run to test the performance of the
node. A naive implementation of SumMultiply will run out of memory in
some cases and this method checks that the implementation is not naive
but good.
"""
# Check: Broadcasted plates are computed efficiently
# (bad implementation will take a long time to run)
s = scale
X1 = GaussianARD(np.random.randn(s,s),
np.random.rand(s,s),
shape=(s,),
plates=(s,))
X2 = GaussianARD(np.random.randn(s,1,s),
np.random.rand(s,1,s),
shape=(s,),
plates=(s,1))
F = SumMultiply('i,i', X1, X2)
Y = GaussianARD(F, 1)
Y.observe(np.ones((s,s)))
try:
F._message_to_parent(1)
except e:
print(e)
print('SOMETHING BAD HAPPENED')
# Check: Broadcasted dimensions are computed efficiently
# (bad implementation will run out of memory)
pass
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for curses-based CLI widgets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.debug.cli import curses_widgets
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
RTL = debugger_cli_common.RichTextLines
CNH = curses_widgets.CursesNavigationHistory
class CNHTest(test_util.TensorFlowTestCase):
def testConstructorWorks(self):
CNH(10)
def testConstructorWithInvalidCapacityErrors(self):
with self.assertRaises(ValueError):
CNH(0)
with self.assertRaises(ValueError):
CNH(-1)
def testInitialStateIsCorrect(self):
nav_history = CNH(10)
self.assertEqual(0, nav_history.size())
self.assertFalse(nav_history.can_go_forward())
self.assertFalse(nav_history.can_go_back())
with self.assertRaisesRegex(ValueError, "Empty navigation history"):
nav_history.go_back()
with self.assertRaisesRegex(ValueError, "Empty navigation history"):
nav_history.go_forward()
with self.assertRaisesRegex(ValueError, "Empty navigation history"):
nav_history.update_scroll_position(3)
def testAddOneItemWorks(self):
nav_history = CNH(10)
nav_history.add_item("foo", RTL(["bar"]), 0)
self.assertEqual(1, nav_history.size())
self.assertEqual(0, nav_history.pointer())
self.assertFalse(nav_history.can_go_forward())
self.assertFalse(nav_history.can_go_back())
output = nav_history.go_back()
self.assertEqual("foo", output.command)
self.assertEqual(["bar"], output.screen_output.lines)
self.assertEqual(0, output.scroll_position)
def testAddItemsBeyondCapacityWorks(self):
nav_history = CNH(2)
nav_history.add_item("foo", RTL(["foo_output"]), 0)
nav_history.add_item("bar", RTL(["bar_output"]), 0)
self.assertEqual(2, nav_history.size())
self.assertEqual(1, nav_history.pointer())
self.assertTrue(nav_history.can_go_back())
self.assertFalse(nav_history.can_go_forward())
nav_history.add_item("baz", RTL(["baz_output"]), 0)
self.assertEqual(2, nav_history.size())
self.assertEqual(1, nav_history.pointer())
self.assertTrue(nav_history.can_go_back())
self.assertFalse(nav_history.can_go_forward())
item = nav_history.go_back()
self.assertEqual("bar", item.command)
self.assertFalse(nav_history.can_go_back())
self.assertTrue(nav_history.can_go_forward())
item = nav_history.go_forward()
self.assertEqual("baz", item.command)
self.assertTrue(nav_history.can_go_back())
self.assertFalse(nav_history.can_go_forward())
def testAddItemFromNonLatestPointerPositionWorks(self):
nav_history = CNH(2)
nav_history.add_item("foo", RTL(["foo_output"]), 0)
nav_history.add_item("bar", RTL(["bar_output"]), 0)
nav_history.go_back()
nav_history.add_item("baz", RTL(["baz_output"]), 0)
self.assertEqual(2, nav_history.size())
self.assertEqual(1, nav_history.pointer())
self.assertTrue(nav_history.can_go_back())
self.assertFalse(nav_history.can_go_forward())
item = nav_history.go_back()
self.assertEqual("foo", item.command)
item = nav_history.go_forward()
self.assertEqual("baz", item.command)
def testUpdateScrollPositionOnLatestItemWorks(self):
nav_history = CNH(2)
nav_history.add_item("foo", RTL(["foo_out", "more_foo_out"]), 0)
nav_history.add_item("bar", RTL(["bar_out", "more_bar_out"]), 0)
nav_history.update_scroll_position(1)
nav_history.go_back()
item = nav_history.go_forward()
self.assertEqual("bar", item.command)
self.assertEqual(1, item.scroll_position)
def testUpdateScrollPositionOnOldItemWorks(self):
nav_history = CNH(2)
nav_history.add_item("foo", RTL(["foo_out", "more_foo_out"]), 0)
nav_history.add_item("bar", RTL(["bar_out", "more_bar_out"]), 0)
item = nav_history.go_back()
self.assertEqual("foo", item.command)
self.assertEqual(0, item.scroll_position)
nav_history.update_scroll_position(1)
nav_history.go_forward()
item = nav_history.go_back()
self.assertEqual("foo", item.command)
self.assertEqual(1, item.scroll_position)
item = nav_history.go_forward()
self.assertEqual("bar", item.command)
self.assertEqual(0, item.scroll_position)
def testRenderWithEmptyHistoryWorks(self):
nav_history = CNH(2)
output = nav_history.render(40, "prev", "next")
self.assertEqual(1, len(output.lines))
self.assertEqual(
"| " + CNH.BACK_ARROW_TEXT + " " + CNH.FORWARD_ARROW_TEXT,
output.lines[0])
self.assertEqual({}, output.font_attr_segs)
def testRenderLatestWithSufficientLengthWorks(self):
nav_history = CNH(2)
nav_history.add_item("foo", RTL(["foo_out", "more_foo_out"]), 0)
nav_history.add_item("bar", RTL(["bar_out", "more_bar_out"]), 0)
output = nav_history.render(
40,
"prev",
"next",
latest_command_attribute="green",
old_command_attribute="yellow")
self.assertEqual(1, len(output.lines))
self.assertEqual(
"| " + CNH.BACK_ARROW_TEXT + " " + CNH.FORWARD_ARROW_TEXT +
" | bar",
output.lines[0])
self.assertEqual(2, output.font_attr_segs[0][0][0])
self.assertEqual(5, output.font_attr_segs[0][0][1])
self.assertEqual("prev", output.font_attr_segs[0][0][2].content)
self.assertEqual(12, output.font_attr_segs[0][1][0])
self.assertEqual(15, output.font_attr_segs[0][1][1])
self.assertEqual("green", output.font_attr_segs[0][1][2])
def testRenderOldButNotOldestWithSufficientLengthWorks(self):
nav_history = CNH(3)
nav_history.add_item("foo", RTL(["foo_out", "more_foo_out"]), 0)
nav_history.add_item("bar", RTL(["bar_out", "more_bar_out"]), 0)
nav_history.add_item("baz", RTL(["baz_out", "more_baz_out"]), 0)
nav_history.go_back()
output = nav_history.render(
40,
"prev",
"next",
latest_command_attribute="green",
old_command_attribute="yellow")
self.assertEqual(1, len(output.lines))
self.assertEqual(
"| " + CNH.BACK_ARROW_TEXT + " " + CNH.FORWARD_ARROW_TEXT +
" | (-1) bar",
output.lines[0])
self.assertEqual(2, output.font_attr_segs[0][0][0])
self.assertEqual(5, output.font_attr_segs[0][0][1])
self.assertEqual("prev", output.font_attr_segs[0][0][2].content)
self.assertEqual(6, output.font_attr_segs[0][1][0])
self.assertEqual(9, output.font_attr_segs[0][1][1])
self.assertEqual("next", output.font_attr_segs[0][1][2].content)
self.assertEqual(12, output.font_attr_segs[0][2][0])
self.assertEqual(17, output.font_attr_segs[0][2][1])
self.assertEqual("yellow", output.font_attr_segs[0][2][2])
self.assertEqual(17, output.font_attr_segs[0][3][0])
self.assertEqual(20, output.font_attr_segs[0][3][1])
self.assertEqual("yellow", output.font_attr_segs[0][3][2])
def testRenderOldestWithSufficientLengthWorks(self):
nav_history = CNH(3)
nav_history.add_item("foo", RTL(["foo_out", "more_foo_out"]), 0)
nav_history.add_item("bar", RTL(["bar_out", "more_bar_out"]), 0)
nav_history.add_item("baz", RTL(["baz_out", "more_baz_out"]), 0)
nav_history.go_back()
nav_history.go_back()
output = nav_history.render(
40,
"prev",
"next",
latest_command_attribute="green",
old_command_attribute="yellow")
self.assertEqual(1, len(output.lines))
self.assertEqual(
"| " + CNH.BACK_ARROW_TEXT + " " + CNH.FORWARD_ARROW_TEXT +
" | (-2) foo",
output.lines[0])
self.assertEqual(6, output.font_attr_segs[0][0][0])
self.assertEqual(9, output.font_attr_segs[0][0][1])
self.assertEqual("next", output.font_attr_segs[0][0][2].content)
self.assertEqual(12, output.font_attr_segs[0][1][0])
self.assertEqual(17, output.font_attr_segs[0][1][1])
self.assertEqual("yellow", output.font_attr_segs[0][1][2])
self.assertEqual(17, output.font_attr_segs[0][2][0])
self.assertEqual(20, output.font_attr_segs[0][2][1])
self.assertEqual("yellow", output.font_attr_segs[0][2][2])
def testRenderWithInsufficientLengthWorks(self):
nav_history = CNH(2)
nav_history.add_item("long_command", RTL(["output"]), 0)
output = nav_history.render(
15,
"prev",
"next",
latest_command_attribute="green",
old_command_attribute="yellow")
self.assertEqual(1, len(output.lines))
self.assertEqual(
"| " + CNH.BACK_ARROW_TEXT + " " + CNH.FORWARD_ARROW_TEXT +
" | lon",
output.lines[0])
self.assertEqual(12, output.font_attr_segs[0][0][0])
self.assertEqual(15, output.font_attr_segs[0][0][1])
self.assertEqual("green", output.font_attr_segs[0][0][2])
if __name__ == "__main__":
googletest.main()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import json
import logging
import os
import random
import re
import sys
import time
import Queue
import threading
from geopy.geocoders import GoogleV3
from pgoapi import PGoApi
from pgoapi.utilities import f2i, get_cell_ids
import cell_workers
from base_task import BaseTask
from plugin_loader import PluginLoader
from api_wrapper import ApiWrapper
from cell_workers.utils import distance
from event_manager import EventManager
from human_behaviour import sleep
from item_list import Item
from metrics import Metrics
from sleep_schedule import SleepSchedule
from pokemongo_bot.event_handlers import LoggingHandler, SocketIoHandler, ColoredLoggingHandler, SocialHandler
from pokemongo_bot.socketio_server.runner import SocketIoRunner
from pokemongo_bot.websocket_remote_control import WebsocketRemoteControl
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.datastore import _init_database, Datastore
from worker_result import WorkerResult
from tree_config_builder import ConfigException, MismatchTaskApiVersion, TreeConfigBuilder
from inventory import init_inventory
from sys import platform as _platform
import struct
class PokemonGoBot(Datastore):
@property
def position(self):
return self.api.actual_lat, self.api.actual_lng, self.api.actual_alt
@property
def noised_position(self):
return self.api.noised_lat, self.api.noised_lng, self.api.noised_alt
#@position.setter # these should be called through api now that gps replication is there...
#def position(self, position_tuple):
# self.api._position_lat, self.api._position_lng, self.api._position_alt = position_tuple
@property
def player_data(self):
"""
Returns the player data as received from the API.
:return: The player data.
:rtype: dict
"""
return self._player
def __init__(self, config):
# Database connection MUST be setup before migrations will work
self.database = _init_database('/data/{}.db'.format(config.username))
self.config = config
super(PokemonGoBot, self).__init__()
self.fort_timeouts = dict()
self.pokemon_list = json.load(
open(os.path.join(_base_dir, 'data', 'pokemon.json'))
)
self.item_list = json.load(open(os.path.join(_base_dir, 'data', 'items.json')))
self.metrics = Metrics(self)
self.latest_inventory = None
self.cell = None
self.recent_forts = [None] * config.forts_max_circle_size
self.tick_count = 0
self.softban = False
self.start_position = None
self.last_map_object = None
self.last_time_map_object = 0
self.logger = logging.getLogger(type(self).__name__)
self.alt = self.config.gps_default_altitude
# Make our own copy of the workers for this instance
self.workers = []
# Theading setup for file writing
self.web_update_queue = Queue.Queue(maxsize=1)
self.web_update_thread = threading.Thread(target=self.update_web_location_worker)
self.web_update_thread.start()
# Heartbeat limiting
self.heartbeat_threshold = self.config.heartbeat_threshold
self.heartbeat_counter = 0
self.last_heartbeat = time.time()
def start(self):
self._setup_event_system()
self._setup_logging()
self.sleep_schedule = SleepSchedule(self, self.config.sleep_schedule) if self.config.sleep_schedule else None
if self.sleep_schedule: self.sleep_schedule.work()
self._setup_api()
self._load_recent_forts()
init_inventory(self)
self.display_player_info()
self._print_character_info()
if self.config.pokemon_bag_show_at_start and self.config.pokemon_bag_pokemon_info:
self._print_list_pokemon()
random.seed()
def _setup_event_system(self):
handlers = []
if self.config.logging_color:
handlers.append(ColoredLoggingHandler())
else:
handlers.append(LoggingHandler())
if self.config.enable_social:
handlers.append(SocialHandler(self))
if self.config.websocket_server_url:
if self.config.websocket_start_embedded_server:
self.sio_runner = SocketIoRunner(self.config.websocket_server_url)
self.sio_runner.start_listening_async()
websocket_handler = SocketIoHandler(
self,
self.config.websocket_server_url
)
handlers.append(websocket_handler)
if self.config.websocket_remote_control:
remote_control = WebsocketRemoteControl(self).start()
self.event_manager = EventManager(*handlers)
self._register_events()
if self.config.show_events:
self.event_manager.event_report()
sys.exit(1)
# Registering event:
# self.event_manager.register_event("location", parameters=['lat', 'lng'])
#
# Emitting event should be enough to add logging and send websocket
# message: :
# self.event_manager.emit('location', 'level'='info', data={'lat': 1, 'lng':1}),
def _register_events(self):
self.event_manager.register_event(
'location_found',
parameters=('position', 'location')
)
self.event_manager.register_event('api_error')
self.event_manager.register_event('config_error')
self.event_manager.register_event('login_started')
self.event_manager.register_event('login_failed')
self.event_manager.register_event('login_successful')
self.event_manager.register_event('set_start_location')
self.event_manager.register_event('load_cached_location')
self.event_manager.register_event('location_cache_ignored')
# ignore candy above threshold
self.event_manager.register_event(
'ignore_candy_above_thresold',
parameters=(
'name',
'amount',
'threshold'
)
)
self.event_manager.register_event(
'position_update',
parameters=(
'current_position',
'last_position',
'distance', # optional
'distance_unit' # optional
)
)
self.event_manager.register_event(
'path_lap_update',
parameters=(
'number_lap',
'number_lap_max'
)
)
self.event_manager.register_event(
'path_lap_end',
parameters=(
'duration',
'resume'
)
)
self.event_manager.register_event('location_cache_error')
self.event_manager.register_event('bot_start')
self.event_manager.register_event('bot_exit')
self.event_manager.register_event('bot_interrupted')
# sleep stuff
self.event_manager.register_event(
'next_sleep',
parameters=('time',)
)
self.event_manager.register_event(
'bot_sleep',
parameters=(
'time_hms',
'wake'
)
)
# random pause
self.event_manager.register_event(
'next_random_pause',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_random_pause',
parameters=(
'time_hms',
'resume'
)
)
# recycle stuff
self.event_manager.register_event(
'next_force_recycle',
parameters=(
'time'
)
)
self.event_manager.register_event('force_recycle')
# random alive pause
self.event_manager.register_event(
'next_random_alive_pause',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_random_alive_pause',
parameters=(
'time_hms',
'resume'
)
)
# fort stuff
self.event_manager.register_event(
'spun_fort',
parameters=(
'fort_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'lured_pokemon_found',
parameters=(
'fort_id',
'fort_name',
'encounter_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'moving_to_fort',
parameters=(
'fort_name',
'distance'
)
)
self.event_manager.register_event(
'moving_to_lured_fort',
parameters=(
'fort_name',
'distance',
'lure_distance'
)
)
self.event_manager.register_event(
'spun_pokestop',
parameters=(
'pokestop', 'exp', 'items'
)
)
self.event_manager.register_event(
'pokestop_empty',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_out_of_range',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_on_cooldown',
parameters=('pokestop', 'minutes_left')
)
self.event_manager.register_event(
'unknown_spin_result',
parameters=('status_code',)
)
self.event_manager.register_event('pokestop_searching_too_often')
self.event_manager.register_event('arrived_at_fort')
# pokemon stuff
self.event_manager.register_event(
'catchable_pokemon',
parameters=(
'pokemon_id',
'spawn_point_id',
'encounter_id',
'latitude',
'longitude',
'expiration_timestamp_ms',
'pokemon_name'
)
)
self.event_manager.register_event(
'pokemon_appeared',
parameters=(
'pokemon',
'ncp',
'cp',
'iv',
'iv_display',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event('no_pokeballs')
self.event_manager.register_event('enough_ultraballs')
self.event_manager.register_event(
'pokemon_catch_rate',
parameters=(
'catch_rate',
'ball_name',
'berry_name',
'berry_count'
)
)
self.event_manager.register_event(
'threw_berry',
parameters=(
'berry_name',
'ball_name',
'new_catch_rate'
)
)
self.event_manager.register_event(
'threw_pokeball',
parameters=(
'throw_type',
'spin_label',
'ball_name',
'success_percentage',
'count_left'
)
)
self.event_manager.register_event(
'pokemon_capture_failed',
parameters=('pokemon',)
)
self.event_manager.register_event(
'pokemon_vanished',
parameters=(
'pokemon',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event('pokemon_not_in_range')
self.event_manager.register_event('pokemon_inventory_full')
self.event_manager.register_event(
'pokemon_caught',
parameters=(
'pokemon',
'ncp', 'cp', 'iv', 'iv_display', 'exp',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event(
'pokemon_evolved',
parameters=('pokemon', 'iv', 'cp', 'xp', 'candy')
)
self.event_manager.register_event('skip_evolve')
self.event_manager.register_event('threw_berry_failed', parameters=('status_code',))
self.event_manager.register_event('vip_pokemon')
self.event_manager.register_event('gained_candy', parameters=('quantity', 'type'))
self.event_manager.register_event('catch_limit')
# level up stuff
self.event_manager.register_event(
'level_up',
parameters=(
'previous_level',
'current_level'
)
)
self.event_manager.register_event(
'level_up_reward',
parameters=('items',)
)
# lucky egg
self.event_manager.register_event(
'used_lucky_egg',
parameters=('amount_left',)
)
self.event_manager.register_event('lucky_egg_error')
# softban
self.event_manager.register_event('softban')
self.event_manager.register_event('softban_fix')
self.event_manager.register_event('softban_fix_done')
# egg incubating
self.event_manager.register_event(
'incubate_try',
parameters=(
'incubator_id',
'egg_id'
)
)
self.event_manager.register_event(
'incubate',
parameters=('distance_in_km',)
)
self.event_manager.register_event(
'next_egg_incubates',
parameters=('eggs_left', 'eggs_inc', 'eggs')
)
self.event_manager.register_event('incubator_already_used')
self.event_manager.register_event('egg_already_incubating')
self.event_manager.register_event(
'egg_hatched',
parameters=(
'pokemon',
'cp', 'iv', 'exp', 'stardust', 'candy'
)
)
# discard item
self.event_manager.register_event(
'item_discarded',
parameters=(
'amount', 'item', 'maximum'
)
)
self.event_manager.register_event(
'item_discard_skipped',
parameters=('space',)
)
self.event_manager.register_event(
'item_discard_fail',
parameters=('item',)
)
# inventory
self.event_manager.register_event('inventory_full')
# release
self.event_manager.register_event(
'keep_best_release',
parameters=(
'amount', 'pokemon', 'criteria'
)
)
self.event_manager.register_event(
'future_pokemon_release',
parameters=(
'pokemon', 'cp', 'iv', 'below_iv', 'below_cp', 'cp_iv_logic'
)
)
self.event_manager.register_event(
'pokemon_release',
parameters=('pokemon', 'iv', 'cp', 'candy')
)
# polyline walker
self.event_manager.register_event(
'polyline_request',
parameters=('url',)
)
# cluster
self.event_manager.register_event(
'found_cluster',
parameters=(
'num_points', 'forts', 'radius', 'distance'
)
)
self.event_manager.register_event(
'arrived_at_cluster',
parameters=(
'num_points', 'forts', 'radius'
)
)
# rename
self.event_manager.register_event(
'rename_pokemon',
parameters=('old_name', 'current_name',)
)
self.event_manager.register_event(
'pokemon_nickname_invalid',
parameters=('nickname',)
)
self.event_manager.register_event(
'unset_pokemon_nickname',
parameters=('old_name',)
)
# Move To map pokemon
self.event_manager.register_event(
'move_to_map_pokemon_fail',
parameters=('message',)
)
self.event_manager.register_event(
'move_to_map_pokemon_updated_map',
parameters=('lat', 'lon')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_to',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_encounter',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_move_towards',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_back',
parameters=('last_lat', 'last_lon')
)
self.event_manager.register_event(
'moving_to_pokemon_throught_fort',
parameters=('fort_name', 'distance','poke_name','poke_dist')
)
# cached recent_forts
self.event_manager.register_event('loaded_cached_forts')
self.event_manager.register_event('cached_fort')
self.event_manager.register_event(
'no_cached_forts',
parameters=('path', )
)
self.event_manager.register_event(
'error_caching_forts',
parameters=('path', )
)
# database shit
self.event_manager.register_event('catch_log')
self.event_manager.register_event('evolve_log')
self.event_manager.register_event('login_log')
self.event_manager.register_event('transfer_log')
self.event_manager.register_event('pokestop_log')
self.event_manager.register_event('softban_log')
def tick(self):
self.health_record.heartbeat()
self.cell = self.get_meta_cell()
if self.sleep_schedule: self.sleep_schedule.work()
now = time.time() * 1000
for fort in self.cell["forts"]:
timeout = fort.get("cooldown_complete_timestamp_ms", 0)
if timeout >= now:
self.fort_timeouts[fort["id"]] = timeout
self.tick_count += 1
# Check if session token has expired
self.check_session(self.position)
for worker in self.workers:
if worker.work() == WorkerResult.RUNNING:
return
def get_meta_cell(self):
location = self.position[0:2]
cells = self.find_close_cells(*location)
# Combine all cells into a single dict of the items we care about.
forts = []
wild_pokemons = []
catchable_pokemons = []
for cell in cells:
if "forts" in cell and len(cell["forts"]):
forts += cell["forts"]
if "wild_pokemons" in cell and len(cell["wild_pokemons"]):
wild_pokemons += cell["wild_pokemons"]
if "catchable_pokemons" in cell and len(cell["catchable_pokemons"]):
catchable_pokemons += cell["catchable_pokemons"]
# If there are forts present in the cells sent from the server or we don't yet have any cell data, return all data retrieved
if len(forts) > 1 or not self.cell:
return {
"forts": forts,
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons
}
# If there are no forts present in the data from the server, keep our existing fort data and only update the pokemon cells.
else:
return {
"forts": self.cell["forts"],
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons
}
def update_web_location(self, cells=[], lat=None, lng=None, alt=None):
# we can call the function with no arguments and still get the position
# and map_cells
if lat is None:
lat = self.api._position_lat
if lng is None:
lng = self.api._position_lng
if alt is None:
alt = self.api._position_alt
# dont cache when teleport_to
if self.api.teleporting:
return
if cells == []:
location = self.position[0:2]
cells = self.find_close_cells(*location)
user_data_cells = os.path.join(_base_dir, 'data', 'cells-%s.json' % self.config.username)
try:
with open(user_data_cells, 'w') as outfile:
json.dump(cells, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_web_location = os.path.join(
_base_dir, 'web', 'location-%s.json' % self.config.username
)
# alt is unused atm but makes using *location easier
try:
with open(user_web_location, 'w') as outfile:
json.dump({
'lat': lat,
'lng': lng,
'alt': alt,
'cells': cells
}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_data_lastlocation = os.path.join(
_base_dir, 'data', 'last-location-%s.json' % self.config.username
)
try:
with open(user_data_lastlocation, 'w') as outfile:
json.dump({'lat': lat, 'lng': lng, 'alt': alt, 'start_position': self.start_position}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
def find_close_cells(self, lat, lng):
cellid = get_cell_ids(lat, lng)
timestamp = [0, ] * len(cellid)
response_dict = self.get_map_objects(lat, lng, timestamp, cellid)
map_objects = response_dict.get(
'responses', {}
).get('GET_MAP_OBJECTS', {})
status = map_objects.get('status', None)
map_cells = []
if status and status == 1:
map_cells = map_objects['map_cells']
position = (lat, lng, 0)
map_cells.sort(
key=lambda x: distance(
lat,
lng,
x['forts'][0]['latitude'],
x['forts'][0]['longitude']) if x.get('forts', []) else 1e6
)
return map_cells
def _setup_logging(self):
# log settings
# log format
if self.config.debug:
log_level = logging.DEBUG
logging.getLogger("requests").setLevel(logging.DEBUG)
logging.getLogger("websocket").setLevel(logging.DEBUG)
logging.getLogger("socketio").setLevel(logging.DEBUG)
logging.getLogger("engineio").setLevel(logging.DEBUG)
logging.getLogger("socketIO-client").setLevel(logging.DEBUG)
logging.getLogger("pgoapi").setLevel(logging.DEBUG)
logging.getLogger("rpc_api").setLevel(logging.DEBUG)
else:
log_level = logging.ERROR
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("websocket").setLevel(logging.ERROR)
logging.getLogger("socketio").setLevel(logging.ERROR)
logging.getLogger("engineio").setLevel(logging.ERROR)
logging.getLogger("socketIO-client").setLevel(logging.ERROR)
logging.getLogger("pgoapi").setLevel(logging.ERROR)
logging.getLogger("rpc_api").setLevel(logging.ERROR)
logging.basicConfig(
level=log_level,
format='%(asctime)s [%(name)10s] [%(levelname)s] %(message)s'
)
def check_session(self, position):
# Check session expiry
if self.api._auth_provider and self.api._auth_provider._ticket_expire:
# prevent crash if return not numeric value
if not self.is_numeric(self.api._auth_provider._ticket_expire):
self.logger.info("Ticket expired value is not numeric", 'yellow')
return
remaining_time = \
self.api._auth_provider._ticket_expire / 1000 - time.time()
if remaining_time < 60:
self.event_manager.emit(
'api_error',
sender=self,
level='info',
formatted='Session stale, re-logging in.'
)
self.api = ApiWrapper(config=self.config)
self.api.set_position(*position)
self.login()
self.api.activate_signature(self.get_encryption_lib())
@staticmethod
def is_numeric(s):
try:
float(s)
return True
except ValueError:
return False
def login(self):
self.event_manager.emit(
'login_started',
sender=self,
level='info',
formatted="Login procedure started."
)
lat, lng = self.position[0:2]
self.api.set_position(lat, lng, self.alt) # or should the alt kept to zero?
while not self.api.login(
self.config.auth_service,
str(self.config.username),
str(self.config.password)):
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Login error, server busy. Waiting 10 seconds to try again."
)
time.sleep(10)
with self.database as conn:
c = conn.cursor()
c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='login'")
result = c.fetchone()
while True:
if result[0] == 1:
conn.execute('''INSERT INTO login (timestamp, message) VALUES (?, ?)''', (time.time(), 'LOGIN_SUCCESS'))
break
else:
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Login table not founded, skipping log"
)
break
self.event_manager.emit(
'login_successful',
sender=self,
level='info',
formatted="Login successful."
)
def get_encryption_lib(self):
if _platform == "Windows" or _platform == "win32":
# Check if we are on 32 or 64 bit
if sys.maxsize > 2**32:
file_name = 'encrypt_64.dll'
else:
file_name = 'encrypt.dll'
else:
file_name = 'encrypt.so'
if self.config.encrypt_location == '':
path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
else:
path = self.config.encrypt_location
full_path = path + '/'+ file_name
if not os.path.isfile(full_path):
self.logger.error(file_name + ' is not found! Please place it in the bots root directory or set encrypt_location in config.')
self.logger.info('Platform: '+ _platform + ' ' + file_name + ' directory: '+ path)
sys.exit(1)
else:
self.logger.info('Found '+ file_name +'! Platform: ' + _platform + ' ' + file_name + ' directory: ' + path)
return full_path
def _setup_api(self):
# instantiate pgoapi
self.api = ApiWrapper(config=self.config)
# provide player position on the earth
self._set_starting_position()
self.login()
# chain subrequests (methods) into one RPC call
self.api.activate_signature(self.get_encryption_lib())
self.logger.info('')
# send empty map_cells and then our position
self.update_web_location()
def _print_character_info(self):
# get player profile call
# ----------------------
response_dict = self.api.get_player()
# print('Response dictionary: \n\r{}'.format(json.dumps(response_dict, indent=2)))
currency_1 = "0"
currency_2 = "0"
if response_dict:
self._player = response_dict['responses']['GET_PLAYER']['player_data']
player = self._player
else:
self.logger.info(
"The API didn't return player info, servers are unstable - "
"retrying.", 'red'
)
sleep(5)
self._print_character_info()
# @@@ TODO: Convert this to d/m/Y H:M:S
creation_date = datetime.datetime.fromtimestamp(
player['creation_timestamp_ms'] / 1e3)
creation_date = creation_date.strftime("%Y/%m/%d %H:%M:%S")
pokecoins = '0'
stardust = '0'
items_inventory = inventory.items()
if 'amount' in player['currencies'][0]:
pokecoins = player['currencies'][0]['amount']
if 'amount' in player['currencies'][1]:
stardust = player['currencies'][1]['amount']
self.logger.info('')
self.logger.info('--- {username} ---'.format(**player))
self.logger.info(
'Pokemon Bag: {}/{}'.format(
inventory.Pokemons.get_space_used(),
inventory.get_pokemon_inventory_size()
)
)
self.logger.info(
'Items: {}/{}'.format(
inventory.Items.get_space_used(),
inventory.get_item_inventory_size()
)
)
self.logger.info(
'Stardust: {}'.format(stardust) +
' | Pokecoins: {}'.format(pokecoins)
)
# Items Output
self.logger.info(
'PokeBalls: ' + str(items_inventory.get(1).count) +
' | GreatBalls: ' + str(items_inventory.get(2).count) +
' | UltraBalls: ' + str(items_inventory.get(3).count) +
' | MasterBalls: ' + str(items_inventory.get(4).count))
self.logger.info(
'RazzBerries: ' + str(items_inventory.get(701).count) +
' | BlukBerries: ' + str(items_inventory.get(702).count) +
' | NanabBerries: ' + str(items_inventory.get(703).count))
self.logger.info(
'LuckyEgg: ' + str(items_inventory.get(301).count) +
' | Incubator: ' + str(items_inventory.get(902).count) +
' | TroyDisk: ' + str(items_inventory.get(501).count))
self.logger.info(
'Potion: ' + str(items_inventory.get(101).count) +
' | SuperPotion: ' + str(items_inventory.get(102).count) +
' | HyperPotion: ' + str(items_inventory.get(103).count) +
' | MaxPotion: ' + str(items_inventory.get(104).count))
self.logger.info(
'Incense: ' + str(items_inventory.get(401).count) +
' | IncenseSpicy: ' + str(items_inventory.get(402).count) +
' | IncenseCool: ' + str(items_inventory.get(403).count))
self.logger.info(
'Revive: ' + str(items_inventory.get(201).count) +
' | MaxRevive: ' + str(items_inventory.get(202).count))
self.logger.info('')
def _print_list_pokemon(self):
# get pokemon list
bag = inventory.pokemons().all()
id_list =list(set(map(lambda x: x.pokemon_id, bag)))
id_list.sort()
pokemon_list = [filter(lambda x: x.pokemon_id == y, bag) for y in id_list]
show_count = self.config.pokemon_bag_show_count
show_candies = self.config.pokemon_bag_show_candies
poke_info_displayed = self.config.pokemon_bag_pokemon_info
def get_poke_info(info, pokemon):
poke_info = {
'cp': 'CP {}'.format(pokemon.cp),
'iv_ads': 'A/D/S {}/{}/{}'.format(pokemon.iv_attack, pokemon.iv_defense, pokemon.iv_stamina),
'iv_pct': 'IV {}'.format(pokemon.iv),
'ivcp': 'IVCP {}'.format(round(pokemon.ivcp,2)),
'ncp': 'NCP {}'.format(round(pokemon.cp_percent,2)),
'level': "Level {}".format(pokemon.level),
'hp': 'HP {}/{}'.format(pokemon.hp, pokemon.hp_max),
'moveset': 'Moves: {}'.format(pokemon.moveset),
'dps': 'DPS {}'.format(round(pokemon.moveset.dps, 2))
}
if info not in poke_info:
raise ConfigException("info '{}' isn't available for displaying".format(info))
return poke_info[info]
self.logger.info('Pokemon:')
for pokes in pokemon_list:
line_p = '#{} {}'.format(pokes[0].pokemon_id, pokes[0].name)
if show_count:
line_p += '[{}]'.format(len(pokes))
if show_candies:
line_p += '[{} candies]'.format(pokes[0].candy_quantity)
line_p += ': '
poke_info = ['({})'.format(', '.join([get_poke_info(x, p) for x in poke_info_displayed])) for p in pokes]
self.logger.info(line_p + ' | '.join(poke_info))
self.logger.info('')
def use_lucky_egg(self):
return self.api.use_item_xp_boost(item_id=301)
def _set_starting_position(self):
self.event_manager.emit(
'set_start_location',
sender=self,
level='info',
formatted='Setting start location.'
)
has_position = False
if self.config.test:
# TODO: Add unit tests
return
if self.config.location:
location_str = self.config.location
location = self.get_pos_by_name(location_str.replace(" ", ""))
msg = "Location found: {location} {position}"
self.event_manager.emit(
'location_found',
sender=self,
level='info',
formatted=msg,
data={
'location': location_str,
'position': location
}
)
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='info',
formatted="Now at {current_position}",
data={
'current_position': self.position,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
self.start_position = self.position
has_position = True
if self.config.location_cache:
try:
# save location flag used to pull the last known location from
# the location.json
self.event_manager.emit(
'load_cached_location',
sender=self,
level='debug',
formatted='Loading cached location...'
)
with open(os.path.join(_base_dir, 'data', 'last-location-%s.json' %
self.config.username)) as f:
location_json = json.load(f)
location = (
location_json['lat'],
location_json['lng'],
location_json['alt'],
)
# If location has been set in config, only use cache if starting position has not differed
if has_position and 'start_position' in location_json:
last_start_position = tuple(location_json.get('start_position', []))
# Start position has to have been set on a previous run to do this check
if last_start_position and last_start_position != self.start_position:
msg = 'Going to a new place, ignoring cached location.'
self.event_manager.emit(
'location_cache_ignored',
sender=self,
level='debug',
formatted=msg
)
return
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='debug',
formatted='Loaded location {current_position} from cache',
data={
'current_position': location,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
has_position = True
except Exception:
if has_position is False:
sys.exit(
"No cached Location. Please specify initial location."
)
self.event_manager.emit(
'location_cache_error',
sender=self,
level='debug',
formatted='Parsing cached location failed.'
)
def get_pos_by_name(self, location_name):
# Check if given location name, belongs to favorite_locations
favorite_location_coords = self._get_pos_by_fav_location(location_name)
if favorite_location_coords is not None:
return favorite_location_coords
# Check if the given location is already a coordinate.
if ',' in location_name:
possible_coordinates = re.findall(
"[-]?\d{1,3}[.]\d{3,7}", location_name
)
if len(possible_coordinates) >= 2:
# 2 matches, this must be a coordinate. We'll bypass the Google
# geocode so we keep the exact location.
self.logger.info(
'[x] Coordinates found in passed in location, '
'not geocoding.'
)
return float(possible_coordinates[0]), float(possible_coordinates[1]), (float(possible_coordinates[2]) if len(possible_coordinates) == 3 else self.alt)
geolocator = GoogleV3(api_key=self.config.gmapkey)
loc = geolocator.geocode(location_name, timeout=10)
return float(loc.latitude), float(loc.longitude), float(loc.altitude)
def _get_pos_by_fav_location(self, location_name):
location_name = location_name.lower()
coords = None
for location in self.config.favorite_locations:
if location.get('name').lower() == location_name:
coords = re.findall(
"[-]?\d{1,3}[.]\d{3,7}", location.get('coords').strip()
)
if len(coords) >= 2:
self.logger.info('Favorite location found: {} ({})'.format(location_name, coords))
break
#TODO: This is real bad
if coords is None:
return coords
else:
return float(coords[0]), float(coords[1]), (float(coords[2]) if len(coords) == 3 else self.alt)
def heartbeat(self):
# Remove forts that we can now spin again.
now = time.time()
self.fort_timeouts = {id: timeout for id, timeout
in self.fort_timeouts.iteritems()
if timeout >= now * 1000}
if now - self.last_heartbeat >= self.heartbeat_threshold:
self.last_heartbeat = now
request = self.api.create_request()
request.get_player()
request.check_awarded_badges()
request.call()
try:
self.web_update_queue.put_nowait(True) # do this outside of thread every tick
except Queue.Full:
pass
def update_web_location_worker(self):
while True:
self.web_update_queue.get()
self.update_web_location()
def display_player_info(self):
inventory_items = self.api.get_inventory()
inventory_items = inventory_items['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']
player_stats = next((x["inventory_item_data"]["player_stats"]
for x in inventory_items
if x.get("inventory_item_data", {}).get("player_stats", {})),
None)
if player_stats:
nextlvlxp = (int(player_stats.get('next_level_xp', 0)) - int(player_stats.get('experience', 0)))
if 'level' in player_stats and 'experience' in player_stats:
self.logger.info(
'Level: {level}'.format(
**player_stats) +
' (Next Level: {} XP)'.format(
nextlvlxp) +
' (Total: {experience} XP)'
''.format(**player_stats))
if 'pokemons_captured' in player_stats and 'poke_stop_visits' in player_stats:
self.logger.info(
'Pokemon Captured: '
'{pokemons_captured}'.format(
**player_stats) +
' | Pokestops Visited: '
'{poke_stop_visits}'.format(
**player_stats))
def get_forts(self, order_by_distance=False):
forts = [fort
for fort in self.cell['forts']
if 'latitude' in fort and 'type' in fort]
if order_by_distance:
forts.sort(key=lambda x: distance(
self.position[0],
self.position[1],
x['latitude'],
x['longitude']
))
return forts
def get_map_objects(self, lat, lng, timestamp, cellid):
if time.time() - self.last_time_map_object < self.config.map_object_cache_time:
return self.last_map_object
self.last_map_object = self.api.get_map_objects(
latitude=f2i(lat),
longitude=f2i(lng),
since_timestamp_ms=timestamp,
cell_id=cellid
)
self.last_time_map_object = time.time()
return self.last_map_object
def _load_recent_forts(self):
if not self.config.forts_cache_recent_forts:
return
cached_forts_path = os.path.join(_base_dir, 'data', 'recent-forts-%s.json' % self.config.username)
try:
# load the cached recent forts
with open(cached_forts_path) as f:
cached_recent_forts = json.load(f)
num_cached_recent_forts = len(cached_recent_forts)
num_recent_forts = len(self.recent_forts)
# Handles changes in max_circle_size
if not num_recent_forts:
self.recent_forts = []
elif num_recent_forts > num_cached_recent_forts:
self.recent_forts[-num_cached_recent_forts:] = cached_recent_forts
elif num_recent_forts < num_cached_recent_forts:
self.recent_forts = cached_recent_forts[-num_recent_forts:]
else:
self.recent_forts = cached_recent_forts
self.event_manager.emit(
'loaded_cached_forts',
sender=self,
level='debug',
formatted='Loaded cached forts...'
)
except IOError:
self.event_manager.emit(
'no_cached_forts',
sender=self,
level='debug',
formatted='Starting new cached forts for {path}',
data={'path': cached_forts_path}
)
|
|
from __future__ import print_function, division
from sympy import S, C, pi, I, Rational, Symbol, Wild, cacheit, sympify
from sympy.core.function import Function, ArgumentIndexError
from sympy.functions.elementary.trigonometric import sin, cos, csc, cot
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.miscellaneous import sqrt, root
from sympy.functions.elementary.complexes import re, im
from sympy.functions.special.gamma_functions import gamma
from sympy.functions.special.hyper import hyper
from sympy.core.compatibility import xrange
# TODO
# o Scorer functions G1 and G2
# o Asymptotic expansions
# These are possible, e.g. for fixed order, but since the bessel type
# functions are oscillatory they are not actually tractable at
# infinity, so this is not particularly useful right now.
# o Series Expansions for functions of the second kind about zero
# o Nicer series expansions.
# o More rewriting.
# o Add solvers to ode.py (or rather add solvers for the hypergeometric equation).
class BesselBase(Function):
"""
Abstract base class for bessel-type functions.
This class is meant to reduce code duplication.
All Bessel type functions can 1) be differentiated, and the derivatives
expressed in terms of similar functions and 2) be rewritten in terms
of other bessel-type functions.
Here "bessel-type functions" are assumed to have one complex parameter.
To use this base class, define class attributes ``_a`` and ``_b`` such that
``2*F_n' = -_a*F_{n+1} + b*F_{n-1}``.
"""
@property
def order(self):
""" The order of the bessel-type function. """
return self.args[0]
@property
def argument(self):
""" The argument of the bessel-type function. """
return self.args[1]
def fdiff(self, argindex=2):
if argindex != 2:
raise ArgumentIndexError(self, argindex)
return (self._b/2 * self.__class__(self.order - 1, self.argument) -
self._a/2 * self.__class__(self.order + 1, self.argument))
def _eval_conjugate(self):
z = self.argument
if (z.is_real and z.is_negative) is False:
return self.__class__(self.order.conjugate(), z.conjugate())
def _eval_expand_func(self, **hints):
nu, z, f = self.order, self.argument, self.__class__
if nu.is_real:
if (nu - 1).is_positive:
return (-self._a*self._b*f(nu - 2, z)._eval_expand_func() +
2*self._a*(nu - 1)*f(nu - 1, z)._eval_expand_func()/z)
elif (nu + 1).is_negative:
return (2*self._b*(nu + 1)*f(nu + 1, z)._eval_expand_func()/z -
self._a*self._b*f(nu + 2, z)._eval_expand_func())
return self
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import besselsimp
return besselsimp(self)
class besselj(BesselBase):
r"""
Bessel function of the first kind.
The Bessel `J` function of order `\nu` is defined to be the function
satisfying Bessel's differential equation
.. math ::
z^2 \frac{\mathrm{d}^2 w}{\mathrm{d}z^2}
+ z \frac{\mathrm{d}w}{\mathrm{d}z} + (z^2 - \nu^2) w = 0,
with Laurent expansion
.. math ::
J_\nu(z) = z^\nu \left(\frac{1}{\Gamma(\nu + 1) 2^\nu} + O(z^2) \right),
if :math:`\nu` is not a negative integer. If :math:`\nu=-n \in \mathbb{Z}_{<0}`
*is* a negative integer, then the definition is
.. math ::
J_{-n}(z) = (-1)^n J_n(z).
Examples
========
Create a Bessel function object:
>>> from sympy import besselj, jn
>>> from sympy.abc import z, n
>>> b = besselj(n, z)
Differentiate it:
>>> b.diff(z)
besselj(n - 1, z)/2 - besselj(n + 1, z)/2
Rewrite in terms of spherical Bessel functions:
>>> b.rewrite(jn)
sqrt(2)*sqrt(z)*jn(n - 1/2, z)/sqrt(pi)
Access the parameter and argument:
>>> b.order
n
>>> b.argument
z
See Also
========
bessely, besseli, besselk
References
==========
.. [1] Abramowitz, Milton; Stegun, Irene A., eds. (1965), "Chapter 9",
Handbook of Mathematical Functions with Formulas, Graphs, and
Mathematical Tables
.. [2] Luke, Y. L. (1969), The Special Functions and Their
Approximations, Volume 1
.. [3] http://en.wikipedia.org/wiki/Bessel_function
.. [4] http://functions.wolfram.com/Bessel-TypeFunctions/BesselJ/
"""
_a = S.One
_b = S.One
@classmethod
def eval(cls, nu, z):
if z.is_zero:
if nu.is_zero:
return S.One
elif (nu.is_integer and nu.is_zero is False) or re(nu).is_positive:
return S.Zero
elif re(nu).is_negative and not (nu.is_integer is True):
return S.ComplexInfinity
elif nu.is_imaginary:
return S.NaN
if z is S.Infinity or (z is S.NegativeInfinity):
return S.Zero
if z.could_extract_minus_sign():
return (z)**nu*(-z)**(-nu)*besselj(nu, -z)
if nu.is_integer:
if nu.could_extract_minus_sign():
return S(-1)**(-nu)*besselj(-nu, z)
newz = z.extract_multiplicatively(I)
if newz: # NOTE we don't want to change the function if z==0
return I**(nu)*besseli(nu, newz)
# branch handling:
from sympy import unpolarify, exp
if nu.is_integer:
newz = unpolarify(z)
if newz != z:
return besselj(nu, newz)
else:
newz, n = z.extract_branch_factor()
if n != 0:
return exp(2*n*pi*nu*I)*besselj(nu, newz)
nnu = unpolarify(nu)
if nu != nnu:
return besselj(nnu, z)
def _eval_rewrite_as_besseli(self, nu, z):
from sympy import polar_lift, exp
return exp(I*pi*nu/2)*besseli(nu, polar_lift(-I)*z)
def _eval_rewrite_as_bessely(self, nu, z):
if nu.is_integer is False:
return csc(pi*nu)*bessely(-nu, z) - cot(pi*nu)*bessely(nu, z)
def _eval_rewrite_as_jn(self, nu, z):
return sqrt(2*z/pi)*jn(nu - S.Half, self.argument)
def _eval_is_real(self):
nu, z = self.args
if nu.is_integer and z.is_real:
return True
class bessely(BesselBase):
r"""
Bessel function of the second kind.
The Bessel `Y` function of order `\nu` is defined as
.. math ::
Y_\nu(z) = \lim_{\mu \to \nu} \frac{J_\mu(z) \cos(\pi \mu)
- J_{-\mu}(z)}{\sin(\pi \mu)},
where :math:`J_\mu(z)` is the Bessel function of the first kind.
It is a solution to Bessel's equation, and linearly independent from
:math:`J_\nu`.
Examples
========
>>> from sympy import bessely, yn
>>> from sympy.abc import z, n
>>> b = bessely(n, z)
>>> b.diff(z)
bessely(n - 1, z)/2 - bessely(n + 1, z)/2
>>> b.rewrite(yn)
sqrt(2)*sqrt(z)*yn(n - 1/2, z)/sqrt(pi)
See Also
========
besselj, besseli, besselk
References
==========
.. [1] http://functions.wolfram.com/Bessel-TypeFunctions/BesselY/
"""
_a = S.One
_b = S.One
@classmethod
def eval(cls, nu, z):
if z.is_zero:
if nu.is_zero:
return S.NegativeInfinity
elif re(nu).is_zero is False:
return S.ComplexInfinity
elif re(nu).is_zero:
return S.NaN
if z is S.Infinity or z is S.NegativeInfinity:
return S.Zero
if nu.is_integer:
if nu.could_extract_minus_sign():
return S(-1)**(-nu)*bessely(-nu, z)
def _eval_rewrite_as_besselj(self, nu, z):
if nu.is_integer is False:
return csc(pi*nu)*(cos(pi*nu)*besselj(nu, z) - besselj(-nu, z))
def _eval_rewrite_as_besseli(self, nu, z):
aj = self._eval_rewrite_as_besselj(*self.args)
if aj:
return aj.rewrite(besseli)
def _eval_rewrite_as_yn(self, nu, z):
return sqrt(2*z/pi) * yn(nu - S.Half, self.argument)
def _eval_is_real(self):
nu, z = self.args
if nu.is_integer and z.is_positive:
return True
class besseli(BesselBase):
r"""
Modified Bessel function of the first kind.
The Bessel I function is a solution to the modified Bessel equation
.. math ::
z^2 \frac{\mathrm{d}^2 w}{\mathrm{d}z^2}
+ z \frac{\mathrm{d}w}{\mathrm{d}z} + (z^2 + \nu^2)^2 w = 0.
It can be defined as
.. math ::
I_\nu(z) = i^{-\nu} J_\nu(iz),
where :math:`J_\nu(z)` is the Bessel function of the first kind.
Examples
========
>>> from sympy import besseli
>>> from sympy.abc import z, n
>>> besseli(n, z).diff(z)
besseli(n - 1, z)/2 + besseli(n + 1, z)/2
See Also
========
besselj, bessely, besselk
References
==========
.. [1] http://functions.wolfram.com/Bessel-TypeFunctions/BesselI/
"""
_a = -S.One
_b = S.One
@classmethod
def eval(cls, nu, z):
if z.is_zero:
if nu.is_zero:
return S.One
elif (nu.is_integer and nu.is_zero is False) or re(nu).is_positive:
return S.Zero
elif re(nu).is_negative and not (nu.is_integer is True):
return S.ComplexInfinity
elif nu.is_imaginary:
return S.NaN
if z.is_imaginary:
if im(z) is S.Infinity or im(z) is S.NegativeInfinity:
return S.Zero
if z.could_extract_minus_sign():
return (z)**nu*(-z)**(-nu)*besseli(nu, -z)
if nu.is_integer:
if nu.could_extract_minus_sign():
return besseli(-nu, z)
newz = z.extract_multiplicatively(I)
if newz: # NOTE we don't want to change the function if z==0
return I**(-nu)*besselj(nu, -newz)
# branch handling:
from sympy import unpolarify, exp
if nu.is_integer:
newz = unpolarify(z)
if newz != z:
return besseli(nu, newz)
else:
newz, n = z.extract_branch_factor()
if n != 0:
return exp(2*n*pi*nu*I)*besseli(nu, newz)
nnu = unpolarify(nu)
if nu != nnu:
return besseli(nnu, z)
def _eval_rewrite_as_besselj(self, nu, z):
from sympy import polar_lift, exp
return exp(-I*pi*nu/2)*besselj(nu, polar_lift(I)*z)
def _eval_rewrite_as_bessely(self, nu, z):
aj = self._eval_rewrite_as_besselj(*self.args)
if aj:
return aj.rewrite(bessely)
def _eval_rewrite_as_jn(self, nu, z):
return self._eval_rewrite_as_besselj(*self.args).rewrite(jn)
def _eval_is_real(self):
nu, z = self.args
if nu.is_integer and z.is_real:
return True
class besselk(BesselBase):
r"""
Modified Bessel function of the second kind.
The Bessel K function of order :math:`\nu` is defined as
.. math ::
K_\nu(z) = \lim_{\mu \to \nu} \frac{\pi}{2}
\frac{I_{-\mu}(z) -I_\mu(z)}{\sin(\pi \mu)},
where :math:`I_\mu(z)` is the modified Bessel function of the first kind.
It is a solution of the modified Bessel equation, and linearly independent
from :math:`Y_\nu`.
Examples
========
>>> from sympy import besselk
>>> from sympy.abc import z, n
>>> besselk(n, z).diff(z)
-besselk(n - 1, z)/2 - besselk(n + 1, z)/2
See Also
========
besselj, besseli, bessely
References
==========
.. [1] http://functions.wolfram.com/Bessel-TypeFunctions/BesselK/
"""
_a = S.One
_b = -S.One
@classmethod
def eval(cls, nu, z):
if z.is_zero:
if nu.is_zero:
return S.Infinity
elif re(nu).is_zero is False:
return S.ComplexInfinity
elif re(nu).is_zero:
return S.NaN
if z.is_imaginary:
if im(z) is S.Infinity or im(z) is S.NegativeInfinity:
return S.Zero
if nu.is_integer:
if nu.could_extract_minus_sign():
return besselk(-nu, z)
def _eval_rewrite_as_besseli(self, nu, z):
if nu.is_integer is False:
return pi*csc(pi*nu)*(besseli(-nu, z) - besseli(nu, z))/2
def _eval_rewrite_as_besselj(self, nu, z):
ai = self._eval_rewrite_as_besseli(*self.args)
if ai:
return ai.rewrite(besselj)
def _eval_rewrite_as_bessely(self, nu, z):
aj = self._eval_rewrite_as_besselj(*self.args)
if aj:
return aj.rewrite(bessely)
def _eval_rewrite_as_yn(self, nu, z):
ay = self._eval_rewrite_as_bessely(*self.args)
if ay:
return ay.rewrite(yn)
def _eval_is_real(self):
nu, z = self.args
if nu.is_integer and z.is_positive:
return True
class hankel1(BesselBase):
r"""
Hankel function of the first kind.
This function is defined as
.. math ::
H_\nu^{(1)} = J_\nu(z) + iY_\nu(z),
where :math:`J_\nu(z)` is the Bessel function of the first kind, and
:math:`Y_\nu(z)` is the Bessel function of the second kind.
It is a solution to Bessel's equation.
Examples
========
>>> from sympy import hankel1
>>> from sympy.abc import z, n
>>> hankel1(n, z).diff(z)
hankel1(n - 1, z)/2 - hankel1(n + 1, z)/2
See Also
========
hankel2, besselj, bessely
References
==========
.. [1] http://functions.wolfram.com/Bessel-TypeFunctions/HankelH1/
"""
_a = S.One
_b = S.One
def _eval_conjugate(self):
z = self.argument
if (z.is_real and z.is_negative) is False:
return hankel2(self.order.conjugate(), z.conjugate())
class hankel2(BesselBase):
r"""
Hankel function of the second kind.
This function is defined as
.. math ::
H_\nu^{(2)} = J_\nu(z) - iY_\nu(z),
where :math:`J_\nu(z)` is the Bessel function of the first kind, and
:math:`Y_\nu(z)` is the Bessel function of the second kind.
It is a solution to Bessel's equation, and linearly independent from
:math:`H_\nu^{(1)}`.
Examples
========
>>> from sympy import hankel2
>>> from sympy.abc import z, n
>>> hankel2(n, z).diff(z)
hankel2(n - 1, z)/2 - hankel2(n + 1, z)/2
See Also
========
hankel1, besselj, bessely
References
==========
.. [1] http://functions.wolfram.com/Bessel-TypeFunctions/HankelH2/
"""
_a = S.One
_b = S.One
def _eval_conjugate(self):
z = self.argument
if (z.is_real and z.is_negative) is False:
return hankel1(self.order.conjugate(), z.conjugate())
from sympy.polys.orthopolys import spherical_bessel_fn as fn
class SphericalBesselBase(BesselBase):
"""
Base class for spherical Bessel functions.
These are thin wrappers around ordinary Bessel functions,
since spherical Bessel functions differ from the ordinary
ones just by a slight change in order.
To use this class, define the ``_rewrite`` and ``_expand`` methods.
"""
def _expand(self, **hints):
""" Expand self into a polynomial. Nu is guaranteed to be Integer. """
raise NotImplementedError('expansion')
def _rewrite(self):
""" Rewrite self in terms of ordinary Bessel functions. """
raise NotImplementedError('rewriting')
def _eval_expand_func(self, **hints):
if self.order.is_Integer:
return self._expand(**hints)
else:
return self
def _eval_evalf(self, prec):
return self._rewrite()._eval_evalf(prec)
def fdiff(self, argindex=2):
if argindex != 2:
raise ArgumentIndexError(self, argindex)
return self.__class__(self.order - 1, self.argument) - \
self * (self.order + 1)/self.argument
class jn(SphericalBesselBase):
r"""
Spherical Bessel function of the first kind.
This function is a solution to the spherical Bessel equation
.. math ::
z^2 \frac{\mathrm{d}^2 w}{\mathrm{d}z^2}
+ 2z \frac{\mathrm{d}w}{\mathrm{d}z} + (z^2 - \nu(\nu + 1)) w = 0.
It can be defined as
.. math ::
j_\nu(z) = \sqrt{\frac{\pi}{2z}} J_{\nu + \frac{1}{2}}(z),
where :math:`J_\nu(z)` is the Bessel function of the first kind.
Examples
========
>>> from sympy import Symbol, jn, sin, cos, expand_func
>>> z = Symbol("z")
>>> print(jn(0, z).expand(func=True))
sin(z)/z
>>> jn(1, z).expand(func=True) == sin(z)/z**2 - cos(z)/z
True
>>> expand_func(jn(3, z))
(-6/z**2 + 15/z**4)*sin(z) + (1/z - 15/z**3)*cos(z)
The spherical Bessel functions of integral order
are calculated using the formula:
.. math:: j_n(z) = f_n(z) \sin{z} + (-1)^{n+1} f_{-n-1}(z) \cos{z},
where the coefficients :math:`f_n(z)` are available as
:func:`polys.orthopolys.spherical_bessel_fn`.
See Also
========
besselj, bessely, besselk, yn
"""
def _rewrite(self):
return self._eval_rewrite_as_besselj(self.order, self.argument)
def _eval_rewrite_as_besselj(self, nu, z):
return sqrt(pi/(2*z)) * besselj(nu + S('1/2'), z)
def _expand(self, **hints):
n = self.order
z = self.argument
return fn(n, z) * sin(z) + (-1)**(n + 1) * fn(-n - 1, z) * cos(z)
class yn(SphericalBesselBase):
r"""
Spherical Bessel function of the second kind.
This function is another solution to the spherical Bessel equation, and
linearly independent from :math:`j_n`. It can be defined as
.. math ::
j_\nu(z) = \sqrt{\frac{\pi}{2z}} Y_{\nu + \frac{1}{2}}(z),
where :math:`Y_\nu(z)` is the Bessel function of the second kind.
Examples
========
>>> from sympy import Symbol, yn, sin, cos, expand_func
>>> z = Symbol("z")
>>> print(expand_func(yn(0, z)))
-cos(z)/z
>>> expand_func(yn(1, z)) == -cos(z)/z**2-sin(z)/z
True
For integral orders :math:`n`, :math:`y_n` is calculated using the formula:
.. math:: y_n(z) = (-1)^{n+1} j_{-n-1}(z)
See Also
========
besselj, bessely, besselk, jn
"""
def _rewrite(self):
return self._eval_rewrite_as_bessely(self.order, self.argument)
def _eval_rewrite_as_bessely(self, nu, z):
return sqrt(pi/(2*z)) * bessely(nu + S('1/2'), z)
def _expand(self, **hints):
n = self.order
z = self.argument
return (-1)**(n + 1) * \
(fn(-n - 1, z) * sin(z) + (-1)**(-n) * fn(n, z) * cos(z))
def jn_zeros(n, k, method="sympy", dps=15):
"""
Zeros of the spherical Bessel function of the first kind.
This returns an array of zeros of jn up to the k-th zero.
* method = "sympy": uses :func:`mpmath.besseljzero`
* method = "scipy": uses the
`SciPy's sph_jn <http://docs.scipy.org/doc/scipy/reference/generated/scipy.special.jn.html>`_
and
`newton <http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.newton.html>`_
to find all
roots, which is faster than computing the zeros using a general
numerical solver, but it requires SciPy and only works with low
precision floating point numbers. [The function used with
method="sympy" is a recent addition to mpmath, before that a general
solver was used.]
Examples
========
>>> from sympy import jn_zeros
>>> jn_zeros(2, 4, dps=5)
[5.7635, 9.095, 12.323, 15.515]
See Also
========
jn, yn, besselj, besselk, bessely
"""
from math import pi
if method == "sympy":
from sympy.mpmath import besseljzero
from sympy.mpmath.libmp.libmpf import dps_to_prec
from sympy import Expr
prec = dps_to_prec(dps)
return [Expr._from_mpmath(besseljzero(S(n + 0.5)._to_mpmath(prec),
int(l)), prec)
for l in xrange(1, k + 1)]
elif method == "scipy":
from scipy.special import sph_jn
from scipy.optimize import newton
f = lambda x: sph_jn(n, x)[0][-1]
else:
raise NotImplementedError("Unknown method.")
def solver(f, x):
if method == "scipy":
root = newton(f, x)
else:
raise NotImplementedError("Unknown method.")
return root
# we need to approximate the position of the first root:
root = n + pi
# determine the first root exactly:
root = solver(f, root)
roots = [root]
for i in range(k - 1):
# estimate the position of the next root using the last root + pi:
root = solver(f, root + pi)
roots.append(root)
return roots
class AiryBase(Function):
"""
Abstract base class for Airy functions.
This class is meant to reduce code duplication.
"""
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
return self.args[0].is_real
def _as_real_imag(self, deep=True, **hints):
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (re, im)
def as_real_imag(self, deep=True, **hints):
x, y = self._as_real_imag(deep=deep, **hints)
sq = -y**2/x**2
re = S.Half*(self.func(x+x*sqrt(sq))+self.func(x-x*sqrt(sq)))
im = x/(2*y) * sqrt(sq) * (self.func(x-x*sqrt(sq)) - self.func(x+x*sqrt(sq)))
return (re, im)
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*S.ImaginaryUnit
class airyai(AiryBase):
r"""
The Airy function `\operatorname{Ai}` of the first kind.
The Airy function `\operatorname{Ai}(z)` is defined to be the function
satisfying Airy's differential equation
.. math::
\frac{\mathrm{d}^2 w(z)}{\mathrm{d}z^2} - z w(z) = 0.
Equivalently, for real `z`
.. math::
\operatorname{Ai}(z) := \frac{1}{\pi}
\int_0^\infty \cos\left(\frac{t^3}{3} + z t\right) \mathrm{d}t.
Examples
========
Create an Airy function object:
>>> from sympy import airyai
>>> from sympy.abc import z
>>> airyai(z)
airyai(z)
Several special values are known:
>>> airyai(0)
3**(1/3)/(3*gamma(2/3))
>>> from sympy import oo
>>> airyai(oo)
0
>>> airyai(-oo)
0
The Airy function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(airyai(z))
airyai(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(airyai(z), z)
airyaiprime(z)
>>> diff(airyai(z), z, 2)
z*airyai(z)
Series expansion is also supported:
>>> from sympy import series
>>> series(airyai(z), z, 0, 3)
3**(5/6)*gamma(1/3)/(6*pi) - 3**(1/6)*z*gamma(2/3)/(2*pi) + O(z**3)
We can numerically evaluate the Airy function to arbitrary precision
on the whole complex plane:
>>> airyai(-2).evalf(50)
0.22740742820168557599192443603787379946077222541710
Rewrite Ai(z) in terms of hypergeometric functions:
>>> from sympy import hyper
>>> airyai(z).rewrite(hyper)
-3**(2/3)*z*hyper((), (4/3,), z**3/9)/(3*gamma(1/3)) + 3**(1/3)*hyper((), (2/3,), z**3/9)/(3*gamma(2/3))
See Also
========
airybi: Airy function of the second kind.
airyaiprime: Derivative of the Airy function of the first kind.
airybiprime: Derivative of the Airy function of the second kind.
References
==========
.. [1] http://en.wikipedia.org/wiki/Airy_function
.. [2] http://dlmf.nist.gov/9
.. [3] http://www.encyclopediaofmath.org/index.php/Airy_functions
.. [4] http://mathworld.wolfram.com/AiryFunctions.html
"""
nargs = 1
unbranched = True
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Zero
elif arg is S.NegativeInfinity:
return S.Zero
elif arg is S.Zero:
return S.One / (3**Rational(2, 3) * gamma(Rational(2, 3)))
def fdiff(self, argindex=1):
if argindex == 1:
return airyaiprime(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 1:
p = previous_terms[-1]
return ((3**(S(1)/3)*x)**(-n)*(3**(S(1)/3)*x)**(n + 1)*sin(pi*(2*n/3 + S(4)/3))*C.factorial(n) *
gamma(n/3 + S(2)/3)/(sin(pi*(2*n/3 + S(2)/3))*C.factorial(n + 1)*gamma(n/3 + S(1)/3)) * p)
else:
return (S.One/(3**(S(2)/3)*pi) * gamma((n+S.One)/S(3)) * sin(2*pi*(n+S.One)/S(3)) /
C.factorial(n) * (root(3, 3)*x)**n)
def _eval_rewrite_as_besselj(self, z):
ot = Rational(1, 3)
tt = Rational(2, 3)
a = C.Pow(-z, Rational(3, 2))
if re(z).is_negative:
return ot*sqrt(-z) * (besselj(-ot, tt*a) + besselj(ot, tt*a))
def _eval_rewrite_as_besseli(self, z):
ot = Rational(1, 3)
tt = Rational(2, 3)
a = C.Pow(z, Rational(3, 2))
if re(z).is_positive:
return ot*sqrt(z) * (besseli(-ot, tt*a) - besseli(ot, tt*a))
else:
return ot*(C.Pow(a, ot)*besseli(-ot, tt*a) - z*C.Pow(a, -ot)*besseli(ot, tt*a))
def _eval_rewrite_as_hyper(self, z):
pf1 = S.One / (3**(S(2)/3)*gamma(S(2)/3))
pf2 = z / (root(3, 3)*gamma(S(1)/3))
return pf1 * hyper([], [S(2)/3], z**3/9) - pf2 * hyper([], [S(4)/3], z**3/9)
def _eval_expand_func(self, **hints):
arg = self.args[0]
symbs = arg.atoms(Symbol)
if len(symbs) == 1:
z = symbs.pop()
c = Wild("c", exclude=[z])
d = Wild("d", exclude=[z])
m = Wild("m", exclude=[z])
n = Wild("n", exclude=[z])
M = arg.match(c*(d*z**n)**m)
if M is not None:
m = M[m]
# The transformation is given by 03.05.16.0001.01
# http://functions.wolfram.com/Bessel-TypeFunctions/AiryAi/16/01/01/0001/
if (3*m).is_integer:
c = M[c]
d = M[d]
n = M[n]
pf = (d * z**n)**m / (d**m * z**(m*n))
newarg = c * d**m * z**(m*n)
return S.Half * ((pf + S.One)*airyai(newarg) - (pf - S.One)/sqrt(3)*airybi(newarg))
class airybi(AiryBase):
r"""
The Airy function `\operatorname{Bi}` of the second kind.
The Airy function `\operatorname{Bi}(z)` is defined to be the function
satisfying Airy's differential equation
.. math::
\frac{\mathrm{d}^2 w(z)}{\mathrm{d}z^2} - z w(z) = 0.
Equivalently, for real `z`
.. math::
\operatorname{Bi}(z) := \frac{1}{\pi}
\int_0^\infty
\exp\left(-\frac{t^3}{3} + z t\right)
+ \sin\left(\frac{t^3}{3} + z t\right) \mathrm{d}t.
Examples
========
Create an Airy function object:
>>> from sympy import airybi
>>> from sympy.abc import z
>>> airybi(z)
airybi(z)
Several special values are known:
>>> airybi(0)
3**(5/6)/(3*gamma(2/3))
>>> from sympy import oo
>>> airybi(oo)
oo
>>> airybi(-oo)
0
The Airy function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(airybi(z))
airybi(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(airybi(z), z)
airybiprime(z)
>>> diff(airybi(z), z, 2)
z*airybi(z)
Series expansion is also supported:
>>> from sympy import series
>>> series(airybi(z), z, 0, 3)
3**(1/3)*gamma(1/3)/(2*pi) + 3**(2/3)*z*gamma(2/3)/(2*pi) + O(z**3)
We can numerically evaluate the Airy function to arbitrary precision
on the whole complex plane:
>>> airybi(-2).evalf(50)
-0.41230258795639848808323405461146104203453483447240
Rewrite Bi(z) in terms of hypergeometric functions:
>>> from sympy import hyper
>>> airybi(z).rewrite(hyper)
3**(1/6)*z*hyper((), (4/3,), z**3/9)/gamma(1/3) + 3**(5/6)*hyper((), (2/3,), z**3/9)/(3*gamma(2/3))
See Also
========
airyai: Airy function of the first kind.
airyaiprime: Derivative of the Airy function of the first kind.
airybiprime: Derivative of the Airy function of the second kind.
References
==========
.. [1] http://en.wikipedia.org/wiki/Airy_function
.. [2] http://dlmf.nist.gov/9
.. [3] http://www.encyclopediaofmath.org/index.php/Airy_functions
.. [4] http://mathworld.wolfram.com/AiryFunctions.html
"""
nargs = 1
unbranched = True
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Zero
elif arg is S.Zero:
return S.One / (3**Rational(1, 6) * gamma(Rational(2, 3)))
def fdiff(self, argindex=1):
if argindex == 1:
return airybiprime(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 1:
p = previous_terms[-1]
return (3**(S(1)/3)*x * Abs(sin(2*pi*(n + S.One)/S(3))) * C.factorial((n - S.One)/S(3)) /
((n + S.One) * Abs(cos(2*pi*(n + S.Half)/S(3))) * C.factorial((n - 2)/S(3))) * p)
else:
return (S.One/(root(3, 6)*pi) * gamma((n + S.One)/S(3)) * Abs(sin(2*pi*(n + S.One)/S(3))) /
C.factorial(n) * (root(3, 3)*x)**n)
def _eval_rewrite_as_besselj(self, z):
ot = Rational(1, 3)
tt = Rational(2, 3)
a = C.Pow(-z, Rational(3, 2))
if re(z).is_negative:
return sqrt(-z/3) * (besselj(-ot, tt*a) - besselj(ot, tt*a))
def _eval_rewrite_as_besseli(self, z):
ot = Rational(1, 3)
tt = Rational(2, 3)
a = C.Pow(z, Rational(3, 2))
if re(z).is_positive:
return sqrt(z)/sqrt(3) * (besseli(-ot, tt*a) + besseli(ot, tt*a))
else:
b = C.Pow(a, ot)
c = C.Pow(a, -ot)
return sqrt(ot)*(b*besseli(-ot, tt*a) + z*c*besseli(ot, tt*a))
def _eval_rewrite_as_hyper(self, z):
pf1 = S.One / (root(3, 6)*gamma(S(2)/3))
pf2 = z*root(3, 6) / gamma(S(1)/3)
return pf1 * hyper([], [S(2)/3], z**3/9) + pf2 * hyper([], [S(4)/3], z**3/9)
def _eval_expand_func(self, **hints):
arg = self.args[0]
symbs = arg.atoms(Symbol)
if len(symbs) == 1:
z = symbs.pop()
c = Wild("c", exclude=[z])
d = Wild("d", exclude=[z])
m = Wild("m", exclude=[z])
n = Wild("n", exclude=[z])
M = arg.match(c*(d*z**n)**m)
if M is not None:
m = M[m]
# The transformation is given by 03.06.16.0001.01
# http://functions.wolfram.com/Bessel-TypeFunctions/AiryBi/16/01/01/0001/
if (3*m).is_integer:
c = M[c]
d = M[d]
n = M[n]
pf = (d * z**n)**m / (d**m * z**(m*n))
newarg = c * d**m * z**(m*n)
return S.Half * (sqrt(3)*(S.One - pf)*airyai(newarg) + (S.One + pf)*airybi(newarg))
class airyaiprime(AiryBase):
r"""
The derivative `\operatorname{Ai}^\prime` of the Airy function of the first kind.
The Airy function `\operatorname{Ai}^\prime(z)` is defined to be the function
.. math::
\operatorname{Ai}^\prime(z) := \frac{\mathrm{d} \operatorname{Ai}(z)}{\mathrm{d} z}.
Examples
========
Create an Airy function object:
>>> from sympy import airyaiprime
>>> from sympy.abc import z
>>> airyaiprime(z)
airyaiprime(z)
Several special values are known:
>>> airyaiprime(0)
-3**(2/3)/(3*gamma(1/3))
>>> from sympy import oo
>>> airyaiprime(oo)
0
The Airy function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(airyaiprime(z))
airyaiprime(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(airyaiprime(z), z)
z*airyai(z)
>>> diff(airyaiprime(z), z, 2)
z*airyaiprime(z) + airyai(z)
Series expansion is also supported:
>>> from sympy import series
>>> series(airyaiprime(z), z, 0, 3)
-3**(2/3)/(3*gamma(1/3)) + 3**(1/3)*z**2/(6*gamma(2/3)) + O(z**3)
We can numerically evaluate the Airy function to arbitrary precision
on the whole complex plane:
>>> airyaiprime(-2).evalf(50)
0.61825902074169104140626429133247528291577794512415
Rewrite Ai'(z) in terms of hypergeometric functions:
>>> from sympy import hyper
>>> airyaiprime(z).rewrite(hyper)
3**(1/3)*z**2*hyper((), (5/3,), z**3/9)/(6*gamma(2/3)) - 3**(2/3)*hyper((), (1/3,), z**3/9)/(3*gamma(1/3))
See Also
========
airyai: Airy function of the first kind.
airybi: Airy function of the second kind.
airybiprime: Derivative of the Airy function of the second kind.
References
==========
.. [1] http://en.wikipedia.org/wiki/Airy_function
.. [2] http://dlmf.nist.gov/9
.. [3] http://www.encyclopediaofmath.org/index.php/Airy_functions
.. [4] http://mathworld.wolfram.com/AiryFunctions.html
"""
nargs = 1
unbranched = True
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Zero
elif arg is S.Zero:
return -S.One / (3**Rational(1, 3) * gamma(Rational(1, 3)))
def fdiff(self, argindex=1):
if argindex == 1:
return self.args[0]*airyai(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
from sympy.mpmath import mp
from sympy import Expr
z = self.args[0]._to_mpmath(prec)
oprec = mp.prec
mp.prec = prec
res = mp.airyai(z, derivative=1)
mp.prec = oprec
return Expr._from_mpmath(res, prec)
def _eval_rewrite_as_besselj(self, z):
tt = Rational(2, 3)
a = C.Pow(-z, Rational(3, 2))
if re(z).is_negative:
return z/3 * (besselj(-tt, tt*a) - besselj(tt, tt*a))
def _eval_rewrite_as_besseli(self, z):
ot = Rational(1, 3)
tt = Rational(2, 3)
a = tt * C.Pow(z, Rational(3, 2))
if re(z).is_positive:
return z/3 * (besseli(tt, a) - besseli(-tt, a))
else:
a = C.Pow(z, Rational(3, 2))
b = C.Pow(a, tt)
c = C.Pow(a, -tt)
return ot * (z**2*c*besseli(tt, tt*a) - b*besseli(-ot, tt*a))
def _eval_rewrite_as_hyper(self, z):
pf1 = z**2 / (2*3**(S(2)/3)*gamma(S(2)/3))
pf2 = 1 / (root(3, 3)*gamma(S(1)/3))
return pf1 * hyper([], [S(5)/3], z**3/9) - pf2 * hyper([], [S(1)/3], z**3/9)
def _eval_expand_func(self, **hints):
arg = self.args[0]
symbs = arg.atoms(Symbol)
if len(symbs) == 1:
z = symbs.pop()
c = Wild("c", exclude=[z])
d = Wild("d", exclude=[z])
m = Wild("m", exclude=[z])
n = Wild("n", exclude=[z])
M = arg.match(c*(d*z**n)**m)
if M is not None:
m = M[m]
# The transformation is in principle
# given by 03.07.16.0001.01 but note
# that there is an error in this formule.
# http://functions.wolfram.com/Bessel-TypeFunctions/AiryAiPrime/16/01/01/0001/
if (3*m).is_integer:
c = M[c]
d = M[d]
n = M[n]
pf = (d**m * z**(n*m)) / (d * z**n)**m
newarg = c * d**m * z**(n*m)
return S.Half * ((pf + S.One)*airyaiprime(newarg) + (pf - S.One)/sqrt(3)*airybiprime(newarg))
class airybiprime(AiryBase):
r"""
The derivative `\operatorname{Bi}^\prime` of the Airy function of the first kind.
The Airy function `\operatorname{Bi}^\prime(z)` is defined to be the function
.. math::
\operatorname{Bi}^\prime(z) := \frac{\mathrm{d} \operatorname{Bi}(z)}{\mathrm{d} z}.
Examples
========
Create an Airy function object:
>>> from sympy import airybiprime
>>> from sympy.abc import z
>>> airybiprime(z)
airybiprime(z)
Several special values are known:
>>> airybiprime(0)
3**(1/6)/gamma(1/3)
>>> from sympy import oo
>>> airybiprime(oo)
oo
>>> airybiprime(-oo)
0
The Airy function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(airybiprime(z))
airybiprime(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(airybiprime(z), z)
z*airybi(z)
>>> diff(airybiprime(z), z, 2)
z*airybiprime(z) + airybi(z)
Series expansion is also supported:
>>> from sympy import series
>>> series(airybiprime(z), z, 0, 3)
3**(1/6)/gamma(1/3) + 3**(5/6)*z**2/(6*gamma(2/3)) + O(z**3)
We can numerically evaluate the Airy function to arbitrary precision
on the whole complex plane:
>>> airybiprime(-2).evalf(50)
0.27879516692116952268509756941098324140300059345163
Rewrite Bi'(z) in terms of hypergeometric functions:
>>> from sympy import hyper
>>> airybiprime(z).rewrite(hyper)
3**(5/6)*z**2*hyper((), (5/3,), z**3/9)/(6*gamma(2/3)) + 3**(1/6)*hyper((), (1/3,), z**3/9)/gamma(1/3)
See Also
========
airyai: Airy function of the first kind.
airybi: Airy function of the second kind.
airyaiprime: Derivative of the Airy function of the first kind.
References
==========
.. [1] http://en.wikipedia.org/wiki/Airy_function
.. [2] http://dlmf.nist.gov/9
.. [3] http://www.encyclopediaofmath.org/index.php/Airy_functions
.. [4] http://mathworld.wolfram.com/AiryFunctions.html
"""
nargs = 1
unbranched = True
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Zero
elif arg is S.Zero:
return 3**Rational(1, 6) / gamma(Rational(1, 3))
def fdiff(self, argindex=1):
if argindex == 1:
return self.args[0]*airybi(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
from sympy.mpmath import mp
from sympy import Expr
z = self.args[0]._to_mpmath(prec)
oprec = mp.prec
mp.prec = prec
res = mp.airybi(z, derivative=1)
mp.prec = oprec
return Expr._from_mpmath(res, prec)
def _eval_rewrite_as_besselj(self, z):
tt = Rational(2, 3)
a = tt * C.Pow(-z, Rational(3, 2))
if re(z).is_negative:
return -z/sqrt(3) * (besselj(-tt, a) + besselj(tt, a))
def _eval_rewrite_as_besseli(self, z):
ot = Rational(1, 3)
tt = Rational(2, 3)
a = tt * C.Pow(z, Rational(3, 2))
if re(z).is_positive:
return z/sqrt(3) * (besseli(-tt, a) + besseli(tt, a))
else:
a = C.Pow(z, Rational(3, 2))
b = C.Pow(a, tt)
c = C.Pow(a, -tt)
return sqrt(ot) * (b*besseli(-tt, tt*a) + z**2*c*besseli(tt, tt*a))
def _eval_rewrite_as_hyper(self, z):
pf1 = z**2 / (2*root(3, 6)*gamma(S(2)/3))
pf2 = root(3, 6) / gamma(S(1)/3)
return pf1 * hyper([], [S(5)/3], z**3/9) + pf2 * hyper([], [S(1)/3], z**3/9)
def _eval_expand_func(self, **hints):
arg = self.args[0]
symbs = arg.atoms(Symbol)
if len(symbs) == 1:
z = symbs.pop()
c = Wild("c", exclude=[z])
d = Wild("d", exclude=[z])
m = Wild("m", exclude=[z])
n = Wild("n", exclude=[z])
M = arg.match(c*(d*z**n)**m)
if M is not None:
m = M[m]
# The transformation is in principle
# given by 03.08.16.0001.01 but note
# that there is an error in this formule.
# http://functions.wolfram.com/Bessel-TypeFunctions/AiryBiPrime/16/01/01/0001/
if (3*m).is_integer:
c = M[c]
d = M[d]
n = M[n]
pf = (d**m * z**(n*m)) / (d * z**n)**m
newarg = c * d**m * z**(n*m)
return S.Half * (sqrt(3)*(pf - S.One)*airyaiprime(newarg) + (pf + S.One)*airybiprime(newarg))
|
|
#-*- coding: UTF-8 -*-
"""
PyAssimp
This is the main-module of PyAssimp.
"""
import sys
if sys.version_info < (2,6):
raise 'pyassimp: need python 2.6 or newer'
import ctypes
import os
import numpy
import logging; logger = logging.getLogger("pyassimp")
# Attach a default, null handler, to the logger.
# applications can easily get log messages from pyassimp
# by calling for instance
# >>> logging.basicConfig(level=logging.DEBUG)
# before importing pyassimp
class NullHandler(logging.Handler):
def emit(self, record):
pass
h = NullHandler()
logger.addHandler(h)
from . import structs
from .errors import AssimpError
from . import helper
assimp_structs_as_tuple = (
structs.Matrix4x4,
structs.Matrix3x3,
structs.Vector2D,
structs.Vector3D,
structs.Color3D,
structs.Color4D,
structs.Quaternion,
structs.Plane,
structs.Texel)
def make_tuple(ai_obj, type = None):
res = None
if isinstance(ai_obj, structs.Matrix4x4):
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((4,4))
#import pdb;pdb.set_trace()
elif isinstance(ai_obj, structs.Matrix3x3):
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((3,3))
else:
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_])
return res
# It is faster and more correct to have an init function for each assimp class
def _init_face(aiFace):
aiFace.indices = [aiFace.mIndices[i] for i in range(aiFace.mNumIndices)]
assimp_struct_inits = \
{ structs.Face : _init_face }
def call_init(obj, caller = None):
if helper.hasattr_silent(obj,'contents'): #pointer
_init(obj.contents, obj, caller)
else:
_init(obj,parent=caller)
def _is_init_type(obj):
if helper.hasattr_silent(obj,'contents'): #pointer
return _is_init_type(obj[0])
# null-pointer case that arises when we reach a mesh attribute
# like mBitangents which use mNumVertices rather than mNumBitangents
# so it breaks the 'is iterable' check.
# Basically:
# FIXME!
elif not bool(obj):
return False
tname = obj.__class__.__name__
return not (tname[:2] == 'c_' or tname == 'Structure' \
or tname == 'POINTER') and not isinstance(obj,int)
def _init(self, target = None, parent = None):
"""
Custom initialize() for C structs, adds safely accessable member functionality.
:param target: set the object which receive the added methods. Useful when manipulating
pointers, to skip the intermediate 'contents' deferencing.
"""
if not target:
target = self
dirself = dir(self)
for m in dirself:
if m.startswith("_"):
continue
if m.startswith('mNum'):
if 'm' + m[4:] in dirself:
continue # will be processed later on
else:
name = m[1:].lower()
obj = getattr(self, m)
setattr(target, name, obj)
continue
if m == 'mName':
obj = self.mName
target.name = str(obj.data.decode("utf-8"))
target.__class__.__repr__ = lambda x: str(x.__class__) + "(" + x.name + ")"
target.__class__.__str__ = lambda x: x.name
continue
name = m[1:].lower()
obj = getattr(self, m)
# Create tuples
if isinstance(obj, assimp_structs_as_tuple):
setattr(target, name, make_tuple(obj))
logger.debug(str(self) + ": Added array " + str(getattr(target, name)) + " as self." + name.lower())
continue
if m.startswith('m'):
if name == "parent":
setattr(target, name, parent)
logger.debug("Added a parent as self." + name)
continue
if helper.hasattr_silent(self, 'mNum' + m[1:]):
length = getattr(self, 'mNum' + m[1:])
# -> special case: properties are
# stored as a dict.
if m == 'mProperties':
setattr(target, name, _get_properties(obj, length))
continue
if not length: # empty!
setattr(target, name, [])
logger.debug(str(self) + ": " + name + " is an empty list.")
continue
try:
if obj._type_ in assimp_structs_as_tuple:
setattr(target, name, numpy.array([make_tuple(obj[i]) for i in range(length)], dtype=numpy.float32))
logger.debug(str(self) + ": Added an array of numpy arrays (type "+ str(type(obj)) + ") as self." + name)
else:
setattr(target, name, [obj[i] for i in range(length)]) #TODO: maybe not necessary to recreate an array?
logger.debug(str(self) + ": Added list of " + str(obj) + " " + name + " as self." + name + " (type: " + str(type(obj)) + ")")
# initialize array elements
try:
init = assimp_struct_inits[type(obj[0])]
except KeyError:
if _is_init_type(obj[0]):
for e in getattr(target, name):
call_init(e, target)
else:
for e in getattr(target, name):
init(e)
except IndexError:
logger.error("in " + str(self) +" : mismatch between mNum" + name + " and the actual amount of data in m" + name + ". This may be due to version mismatch between libassimp and pyassimp. Quitting now.")
sys.exit(1)
except ValueError as e:
logger.error("In " + str(self) + "->" + name + ": " + str(e) + ". Quitting now.")
if "setting an array element with a sequence" in str(e):
logger.error("Note that pyassimp does not currently "
"support meshes with mixed triangles "
"and quads. Try to load your mesh with"
" a post-processing to triangulate your"
" faces.")
sys.exit(1)
else: # starts with 'm' but not iterable
setattr(target, name, obj)
logger.debug("Added " + name + " as self." + name + " (type: " + str(type(obj)) + ")")
if _is_init_type(obj):
call_init(obj, target)
if isinstance(self, structs.Mesh):
_finalize_mesh(self, target)
if isinstance(self, structs.Texture):
_finalize_texture(self, target)
return self
class AssimpLib(object):
"""
Assimp-Singleton
"""
load, release, dll = helper.search_library()
#the loader as singleton
_assimp_lib = AssimpLib()
def pythonize_assimp(type, obj, scene):
""" This method modify the Assimp data structures
to make them easier to work with in Python.
Supported operations:
- MESH: replace a list of mesh IDs by reference to these meshes
- ADDTRANSFORMATION: add a reference to an object's transformation taken from their associated node.
:param type: the type of modification to operate (cf above)
:param obj: the input object to modify
:param scene: a reference to the whole scene
"""
if type == "MESH":
meshes = []
for i in obj:
meshes.append(scene.meshes[i])
return meshes
if type == "ADDTRANSFORMATION":
def getnode(node, name):
if node.name == name: return node
for child in node.children:
n = getnode(child, name)
if n: return n
node = getnode(scene.rootnode, obj.name)
if not node:
raise AssimpError("Object " + str(obj) + " has no associated node!")
setattr(obj, "transformation", node.transformation)
def recur_pythonize(node, scene):
""" Recursively call pythonize_assimp on
nodes tree to apply several post-processing to
pythonize the assimp datastructures.
"""
node.meshes = pythonize_assimp("MESH", node.meshes, scene)
for mesh in node.meshes:
mesh.material = scene.materials[mesh.materialindex]
for cam in scene.cameras:
pythonize_assimp("ADDTRANSFORMATION", cam, scene)
#for light in scene.lights:
# pythonize_assimp("ADDTRANSFORMATION", light, scene)
for c in node.children:
recur_pythonize(c, scene)
def load(filename, processing=0):
"""
Loads the model with some specific processing parameters.
filename - file to load model from
processing - processing parameters
result Scene-object with model-data
throws AssimpError - could not open file
"""
#read pure data
#from ctypes import c_char_p, c_uint
#model = _assimp_lib.load(c_char_p(filename), c_uint(processing))
model = _assimp_lib.load(filename.encode("ascii"), processing)
if not model:
#Uhhh, something went wrong!
raise AssimpError("could not import file: %s" % filename)
scene = _init(model.contents)
recur_pythonize(scene.rootnode, scene)
return scene
def release(scene):
from ctypes import pointer
_assimp_lib.release(pointer(scene))
def _finalize_texture(tex, target):
setattr(target, "achformathint", tex.achFormatHint)
data = numpy.array([make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)])
setattr(target, "data", data)
def _finalize_mesh(mesh, target):
""" Building of meshes is a bit specific.
We override here the various datasets that can
not be process as regular fields.
For instance, the length of the normals array is
mNumVertices (no mNumNormals is available)
"""
nb_vertices = getattr(mesh, "mNumVertices")
def fill(name):
mAttr = getattr(mesh, name)
if mAttr:
data = numpy.array([make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)], dtype=numpy.float32)
setattr(target, name[1:].lower(), data)
else:
setattr(target, name[1:].lower(), numpy.array([], dtype="float32"))
def fillarray(name):
mAttr = getattr(mesh, name)
data = []
for index, mSubAttr in enumerate(mAttr):
if mSubAttr:
data.append([make_tuple(getattr(mesh, name)[index][i]) for i in range(nb_vertices)])
setattr(target, name[1:].lower(), numpy.array(data, dtype=numpy.float32))
fill("mNormals")
fill("mTangents")
fill("mBitangents")
fillarray("mColors")
fillarray("mTextureCoords")
# prepare faces
faces = numpy.array([f.indices for f in target.faces], dtype=numpy.int32)
setattr(target, 'faces', faces)
class PropertyGetter(dict):
def __getitem__(self, key):
semantic = 0
if isinstance(key, tuple):
key, semantic = key
return dict.__getitem__(self, (key, semantic))
def keys(self):
for k in dict.keys(self):
yield k[0]
def __iter__(self):
return self.keys()
def items(self):
for k, v in dict.items(self):
yield k[0], v
def _get_properties(properties, length):
"""
Convenience Function to get the material properties as a dict
and values in a python format.
"""
result = {}
#read all properties
for p in [properties[i] for i in range(length)]:
#the name
p = p.contents
key = (str(p.mKey.data.decode("utf-8")).split('.')[1], p.mSemantic)
#the data
from ctypes import POINTER, cast, c_int, c_float, sizeof
if p.mType == 1:
arr = cast(p.mData, POINTER(c_float * int(p.mDataLength/sizeof(c_float)) )).contents
value = [x for x in arr]
elif p.mType == 3: #string can't be an array
value = cast(p.mData, POINTER(structs.MaterialPropertyString)).contents.data.decode("utf-8")
elif p.mType == 4:
arr = cast(p.mData, POINTER(c_int * int(p.mDataLength/sizeof(c_int)) )).contents
value = [x for x in arr]
else:
value = p.mData[:p.mDataLength]
if len(value) == 1:
[value] = value
result[key] = value
return PropertyGetter(result)
def decompose_matrix(matrix):
if not isinstance(matrix, structs.Matrix4x4):
raise AssimpError("pyassimp.decompose_matrix failed: Not a Matrix4x4!")
scaling = structs.Vector3D()
rotation = structs.Quaternion()
position = structs.Vector3D()
from ctypes import byref, pointer
_assimp_lib.dll.aiDecomposeMatrix(pointer(matrix), byref(scaling), byref(rotation), byref(position))
return scaling._init(), rotation._init(), position._init()
|
|
# coding: utf-8
import os
import signal
import shutil
import subprocess
from subprocess import CalledProcessError
import simplejson as json
from sqlalchemy.exc import SQLAlchemyError, SAWarning
from flask import Blueprint, request, url_for, jsonify, make_response
from flask import current_app
from flask.views import MethodView
thresholds = Blueprint('thresholds', __name__)
from daemon.models import Threshold
from daemon.database import db
from daemon.utils import generate_threshold_config
class ThresholdAPI(MethodView):
def post(self):
"""
Adds a threshold to the database. Supports only POST method.
Request body should be a JSON dictionary {"threshold": data}
`data` should be a dictionary with these keys: `host`, `plugin`,
`plugin_instance`, `type`, `type_instance`, `datasource`,
`warning_min`, `warning_max`, `failure_min`, `failure_max`,
`percentage`, `invert`, `hits`, `hysteresis`
Key `type` is mandatory.
"""
try:
data = json.loads(request.form["threshold"])
data["type"]
threshold = Threshold(**data)
db.session.add(threshold)
except (json.JSONDecodeError, KeyError, SQLAlchemyError):
db.session.rollback()
return "Malformed request", 400
else:
db.session.commit()
response = make_response("Threshold added.", 201)
# XXX: url_for() does not make a good URL with provided kwarg
# threshold=cursor.lastrowid
response.headers["Location"] = url_for("thresholds.threshold",
_external=False) + "/%s" % threshold.id
return response
def get(self, threshold_id):
"""
Obtain threshold selected by `id`.
"""
result = Threshold.query.get_or_404(threshold_id)
return jsonify(threshold=result)
def put(self, threshold_id):
"""
Updates the threshold's record in the database. `id` specifies record.
Request body should be a JSON dictionary {"threshold": data}
`data` should be a dictionary with these keys: `host`, `plugin`,
`plugin_instance`, `type`, `type_instance`, `datasource`,
`warning_min`, `warning_max`, `failure_min`, `failure_max`,
`percentage`, `invert`, `hits`, `hysteresis`
"""
try:
data = json.loads(request.form["threshold"])
threshold = Threshold.query.get_or_404(threshold_id)
new = Threshold(**data)
new.id = threshold_id
db.session.merge(new)
except (SQLAlchemyError, SAWarning, json.JSONDecodeError):
db.session.rollback()
return ("Malformed request", 400)
else:
db.session.commit()
return ("Threshold updated.", 200)
def delete(self, threshold_id):
"""
Removes the threshold specified by `id`.
"""
try:
threshold = Threshold.query.get_or_404(threshold_id)
db.session.delete(threshold)
except (SQLAlchemyError, SAWarning):
db.session.rollback()
return ("Error occured.", 500)
else:
db.session.commit()
return ("Threshold removed.", 200)
thresholds_view = ThresholdAPI.as_view("threshold")
thresholds.add_url_rule(
"/threshold",
methods=["POST"],
view_func=thresholds_view
)
thresholds.add_url_rule(
"/threshold/<int:threshold_id>",
methods=["GET", "PUT", "DELETE"],
view_func=thresholds_view
)
@thresholds.route("/thresholds/")
def list_thresholds():
result = Threshold.query.order_by(Threshold.id)
if result.count():
return jsonify(thresholds=list(result))
else:
return "Not Found", 404
@thresholds.route("/lookup_threshold/<host>/<plugin>/<plugin_instance>/<type>/<type_instance>")
def lookup_threshold(host, plugin, plugin_instance, type, type_instance):
"""
Looks up a threshold in the database with similar parameters to the given
one.
Only thresholds with the same `type` will be looked up!
Sorting is based on the number of fields matching given parameters.
"""
def match(row):
value = 0
value += 4 if row.host == host else 0
value += 2 if row.plugin == plugin else 0
value += 1 if row.plugin_instance == plugin_instance else 0
value += 8 if row.type_instance == type_instance else 0
return value
host = None if host == "-" else host
plugin = None if plugin == "-" else plugin
plugin_instance = None if plugin_instance == "-" else plugin_instance
type_instance = None if type_instance == "-" else type_instance
result = Threshold.query.filter(Threshold.type == type)
result = list(result)
result.sort(key=match, reverse=True)
return jsonify(thresholds=result)
@thresholds.route("/generate_threshold")
def config_thresholds(pid=None):
"""
Saves data from database into the file (set up in
settings.settings["collectd_threshold_file"].)
After successful save, restarts the server.
"""
# backup current config
filename = current_app.config.get("collectd_threshold_file",
"thresholds.conf")
filename = os.path.join(os.path.dirname(__file__), filename)
backup = filename + ".bak"
try:
shutil.copyfile(filename, backup)
except IOError:
return "Configuration file not spotted.", 404
result_set = Threshold.query.order_by(Threshold.host). \
order_by(Threshold.plugin).order_by(Threshold.type). \
order_by(Threshold.id)
try:
F = open(filename, "w")
F.write(generate_threshold_config(result_set))
F.close()
except IOError:
shutil.move(backup, filename)
return "Cannot save file.", 404
try:
# test if the new config works
result = subprocess.check_output(["collectd", "-t"])
if result:
# unfortunately there might be errors, even though process' return
# code is 0. But possible errors appear in the output, so we check
# if it exists
raise CalledProcessError("Should be no output", 1)
except (CalledProcessError, OSError):
# restore backup in case of failure
shutil.move(backup, filename)
return "Something in config is wrong, reverting.", 500
else:
os.remove(backup)
# restart the server in case of success
try:
pid = pid or subprocess.check_output(["pidof",
"collectdmon"]).strip().split()[0]
os.kill(int(pid), signal.SIGHUP)
except (subprocess.CalledProcessError, OSError):
return "Cannot restart collectd daemon. You should restart it " + \
"manually on your own.", 503
else:
return "Configuration updated, server restarted.", 200
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import print_function
import itertools
import os
import shutil
import time
from collections import namedtuple
from contextlib import contextmanager
import pkg_resources
from pkg_resources import safe_name
from .common import safe_mkdir
from .fetcher import Fetcher
from .interpreter import PythonInterpreter
from .iterator import Iterator, IteratorInterface
from .orderedset import OrderedSet
from .package import Package, distribution_compatible
from .platforms import Platform
from .resolvable import ResolvableRequirement, resolvables_from_iterable
from .resolver_options import ResolverOptionsBuilder
from .tracer import TRACER
from .util import DistributionHelper
@contextmanager
def patched_packing_env(env):
"""Monkey patch packaging.markers.default_environment"""
old_env = pkg_resources.packaging.markers.default_environment
new_env = lambda: env
pkg_resources._vendor.packaging.markers.default_environment = new_env
try:
yield
finally:
pkg_resources._vendor.packaging.markers.default_environment = old_env
class Untranslateable(Exception):
pass
class Unsatisfiable(Exception):
pass
class StaticIterator(IteratorInterface):
"""An iterator that iterates over a static list of packages."""
def __init__(self, packages, allow_prereleases=None):
self._packages = packages
self._allow_prereleases = allow_prereleases
def iter(self, req):
for package in self._packages:
if package.satisfies(req, allow_prereleases=self._allow_prereleases):
yield package
class _ResolvedPackages(namedtuple('_ResolvedPackages',
'resolvable packages parent constraint_only')):
@classmethod
def empty(cls):
return cls(None, OrderedSet(), None, False)
def merge(self, other):
if other.resolvable is None:
return _ResolvedPackages(self.resolvable, self.packages, self.parent, self.constraint_only)
return _ResolvedPackages(
self.resolvable,
self.packages & other.packages,
self.parent,
self.constraint_only and other.constraint_only)
class _ResolvableSet(object):
@classmethod
def normalize(cls, name):
return safe_name(name).lower()
def __init__(self, tuples=None):
# A list of _ResolvedPackages
self.__tuples = tuples or []
def _collapse(self):
# Collapse all resolvables by name along with the intersection of all compatible packages.
# If the set of compatible packages is the empty set, then we cannot satisfy all the
# specifications for a particular name (e.g. "setuptools==2.2 setuptools>4".)
#
# We need to return the resolvable since it carries its own network context and configuration
# regarding package precedence. This is arbitrary -- we could just as easily say "last
# resolvable wins" but it seems highly unlikely this will materially affect anybody
# adversely but could be the source of subtle resolution quirks.
resolvables = {}
for resolved_packages in self.__tuples:
key = self.normalize(resolved_packages.resolvable.name)
previous = resolvables.get(key, _ResolvedPackages.empty())
if previous.resolvable is None:
resolvables[key] = resolved_packages
else:
resolvables[key] = previous.merge(resolved_packages)
return resolvables
def _synthesize_parents(self, name):
def render_resolvable(resolved_packages):
return '%s%s' % (
str(resolved_packages.resolvable),
'(from: %s)' % resolved_packages.parent if resolved_packages.parent else '')
return ', '.join(
render_resolvable(resolved_packages) for resolved_packages in self.__tuples
if self.normalize(resolved_packages.resolvable.name) == self.normalize(name))
def _check(self):
# Check whether or not the resolvables in this set are satisfiable, raise an exception if not.
for name, resolved_packages in self._collapse().items():
if not resolved_packages.packages:
raise Unsatisfiable(
'Could not satisfy all requirements for %s:\n %s' % (
resolved_packages.resolvable,
self._synthesize_parents(name)
)
)
def merge(self, resolvable, packages, parent=None):
"""Add a resolvable and its resolved packages."""
self.__tuples.append(_ResolvedPackages(resolvable, OrderedSet(packages),
parent, resolvable.is_constraint))
self._check()
def get(self, name):
"""Get the set of compatible packages given a resolvable name."""
resolvable, packages, parent, constraint_only = self._collapse().get(
self.normalize(name), _ResolvedPackages.empty())
return packages
def packages(self):
"""Return a snapshot of resolvable => compatible packages set from the resolvable set."""
return list(self._collapse().values())
def extras(self, name):
return set.union(
*[set(tup.resolvable.extras()) for tup in self.__tuples
if self.normalize(tup.resolvable.name) == self.normalize(name)])
def replace_built(self, built_packages):
"""Return a copy of this resolvable set but with built packages.
:param dict built_packages: A mapping from a resolved package to its locally built package.
:returns: A new resolvable set with built package replacements made.
"""
def map_packages(resolved_packages):
packages = OrderedSet(built_packages.get(p, p) for p in resolved_packages.packages)
return _ResolvedPackages(resolved_packages.resolvable, packages,
resolved_packages.parent, resolved_packages.constraint_only)
return _ResolvableSet([map_packages(rp) for rp in self.__tuples])
class Resolver(object):
"""Interface for resolving resolvable entities into python packages."""
class Error(Exception): pass
def __init__(self, allow_prereleases=None, interpreter=None, platform=None,
pkg_blacklist=None, use_manylinux=None):
self._interpreter = interpreter or PythonInterpreter.get()
self._platform = Platform.create(platform) if platform else Platform.current()
self._allow_prereleases = allow_prereleases
self._blacklist = pkg_blacklist.copy() if pkg_blacklist else {}
self._supported_tags = self._platform.supported_tags(
self._interpreter,
use_manylinux
)
TRACER.log(
'R: tags for %r x %r -> %s' % (self._platform, self._interpreter, self._supported_tags),
V=9
)
def filter_packages_by_supported_tags(self, packages, supported_tags=None):
return [
package for package in packages
if package.compatible(supported_tags or self._supported_tags)
]
def package_iterator(self, resolvable, existing=None):
if existing:
existing = resolvable.compatible(
StaticIterator(existing, allow_prereleases=self._allow_prereleases))
else:
existing = resolvable.packages()
return self.filter_packages_by_supported_tags(existing)
def build(self, package, options):
context = options.get_context()
translator = options.get_translator(self._interpreter, self._supported_tags)
with TRACER.timed('Fetching %s' % package.url, V=2):
local_package = Package.from_href(context.fetch(package))
if local_package is None:
raise Untranslateable('Could not fetch package %s' % package)
with TRACER.timed('Translating %s into distribution' % local_package.local_path, V=2):
dist = translator.translate(local_package)
if dist is None:
raise Untranslateable('Package %s is not translateable by %s' % (package, translator))
if not distribution_compatible(dist, self._supported_tags):
raise Untranslateable(
'Could not get distribution for %s on platform %s.' % (package, self._platform))
return dist
def _resolvable_is_blacklisted(self, resolvable_name):
return (
resolvable_name in self._blacklist and
self._interpreter.identity.matches(self._blacklist[resolvable_name])
)
def resolve(self, resolvables, resolvable_set=None):
resolvables = [(resolvable, None) for resolvable in resolvables]
resolvable_set = resolvable_set or _ResolvableSet()
processed_resolvables = set()
processed_packages = {}
distributions = {}
while resolvables:
while resolvables:
resolvable, parent = resolvables.pop(0)
if resolvable in processed_resolvables:
continue
packages = self.package_iterator(resolvable, existing=resolvable_set.get(resolvable.name))
# TODO: Remove blacklist strategy in favor of smart requirement handling
# https://github.com/pantsbuild/pex/issues/456
if not self._resolvable_is_blacklisted(resolvable.name):
resolvable_set.merge(resolvable, packages, parent)
processed_resolvables.add(resolvable)
built_packages = {}
for resolvable, packages, parent, constraint_only in resolvable_set.packages():
if constraint_only:
continue
assert len(packages) > 0, 'ResolvableSet.packages(%s) should not be empty' % resolvable
package = next(iter(packages))
if resolvable.name in processed_packages:
if package == processed_packages[resolvable.name]:
continue
if package not in distributions:
dist = self.build(package, resolvable.options)
built_package = Package.from_href(dist.location)
built_packages[package] = built_package
distributions[built_package] = dist
package = built_package
distribution = distributions[package]
processed_packages[resolvable.name] = package
new_parent = '%s->%s' % (parent, resolvable) if parent else str(resolvable)
# We patch packaging.markers.default_environment here so we find optional reqs for the
# platform we're building the PEX for, rather than the one we're on.
with patched_packing_env(self._interpreter.identity.pkg_resources_env(self._platform)):
resolvables.extend(
(ResolvableRequirement(req, resolvable.options), new_parent) for req in
distribution.requires(extras=resolvable_set.extras(resolvable.name))
)
resolvable_set = resolvable_set.replace_built(built_packages)
# We may have built multiple distributions depending upon if we found transitive dependencies
# for the same package. But ultimately, resolvable_set.packages() contains the correct version
# for all packages. So loop through it and only return the package version in
# resolvable_set.packages() that is found in distributions.
dists = []
# No point in proceeding if distributions is empty
if not distributions:
return dists
for resolvable, packages, parent, constraint_only in resolvable_set.packages():
if constraint_only:
continue
assert len(packages) > 0, 'ResolvableSet.packages(%s) should not be empty' % resolvable
package = next(iter(packages))
dists.append(distributions[package])
return dists
class CachingResolver(Resolver):
"""A package resolver implementing a package cache."""
@classmethod
def filter_packages_by_ttl(cls, packages, ttl, now=None):
now = now if now is not None else time.time()
return [package for package in packages
if package.remote or package.local and (now - os.path.getmtime(package.local_path)) < ttl]
def __init__(self, cache, cache_ttl, *args, **kw):
self.__cache = cache
self.__cache_ttl = cache_ttl
safe_mkdir(self.__cache)
super(CachingResolver, self).__init__(*args, **kw)
# Short-circuiting package iterator.
def package_iterator(self, resolvable, existing=None):
iterator = Iterator(fetchers=[Fetcher([self.__cache])],
allow_prereleases=self._allow_prereleases)
packages = self.filter_packages_by_supported_tags(resolvable.compatible(iterator))
if packages and self.__cache_ttl:
packages = self.filter_packages_by_ttl(packages, self.__cache_ttl)
return itertools.chain(
packages,
super(CachingResolver, self).package_iterator(resolvable, existing=existing)
)
# Caching sandwich.
def build(self, package, options):
# cache package locally
if package.remote:
package = Package.from_href(options.get_context().fetch(package, into=self.__cache))
os.utime(package.local_path, None)
# build into distribution
dist = super(CachingResolver, self).build(package, options)
# if distribution is not in cache, copy
target = os.path.join(self.__cache, os.path.basename(dist.location))
if not os.path.exists(target):
shutil.copyfile(dist.location, target + '~')
os.rename(target + '~', target)
os.utime(target, None)
return DistributionHelper.distribution_from_path(target)
def platform_to_tags(platform, interpreter):
"""Splits a "platform" like linux_x86_64-36-cp-cp36m into its components.
If a simple platform without hyphens is specified, we will fall back to using
the current interpreter's tags.
"""
if platform.count('-') >= 3:
tags = platform.rsplit('-', 3)
else:
tags = [platform, interpreter.identity.impl_ver,
interpreter.identity.abbr_impl, interpreter.identity.abi_tag]
tags[0] = tags[0].replace('.', '_').replace('-', '_')
return tags
def resolve(requirements,
fetchers=None,
interpreter=None,
platform=None,
context=None,
precedence=None,
cache=None,
cache_ttl=None,
allow_prereleases=None,
pkg_blacklist=None,
use_manylinux=None):
"""Produce all distributions needed to (recursively) meet `requirements`
:param requirements: An iterator of Requirement-like things, either
:class:`pkg_resources.Requirement` objects or requirement strings.
:keyword fetchers: (optional) A list of :class:`Fetcher` objects for locating packages. If
unspecified, the default is to look for packages on PyPI.
:keyword interpreter: (optional) A :class:`PythonInterpreter` object to use for building
distributions and for testing distribution compatibility.
:keyword versions: (optional) a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
:keyword platform: (optional) specify the exact platform you want valid
tags for, or None. If None, use the local system platform.
:keyword impl: (optional) specify the exact implementation you want valid
tags for, or None. If None, use the local interpreter impl.
:keyword abi: (optional) specify the exact abi you want valid
tags for, or None. If None, use the local interpreter abi.
:keyword context: (optional) A :class:`Context` object to use for network access. If
unspecified, the resolver will attempt to use the best available network context.
:keyword precedence: (optional) An ordered list of allowable :class:`Package` classes
to be used for producing distributions. For example, if precedence is supplied as
``(WheelPackage, SourcePackage)``, wheels will be preferred over building from source, and
eggs will not be used at all. If ``(WheelPackage, EggPackage)`` is suppplied, both wheels and
eggs will be used, but the resolver will not resort to building anything from source.
:keyword cache: (optional) A directory to use to cache distributions locally.
:keyword cache_ttl: (optional integer in seconds) If specified, consider non-exact matches when
resolving requirements. For example, if ``setuptools==2.2`` is specified and setuptools 2.2 is
available in the cache, it will always be used. However, if a non-exact requirement such as
``setuptools>=2,<3`` is specified and there exists a setuptools distribution newer than
cache_ttl seconds that satisfies the requirement, then it will be used. If the distribution
is older than cache_ttl seconds, it will be ignored. If ``cache_ttl`` is not specified,
resolving inexact requirements will always result in making network calls through the
``context``.
:keyword allow_prereleases: (optional) Include pre-release and development versions. If
unspecified only stable versions will be resolved, unless explicitly included.
:keyword pkg_blacklist: (optional) A blacklist dict (str->str) that maps package name to
an interpreter constraint. If a package name is in the blacklist and its interpreter
constraint matches the target interpreter, skip the requirement. This is needed to ensure
that universal requirement resolves for a target interpreter version do not error out on
interpreter specific requirements such as backport libs like `functools32`.
For example, a valid blacklist is {'functools32': 'CPython>3'}.
NOTE: this keyword is a temporary fix and will be reverted in favor of a long term solution
tracked by: https://github.com/pantsbuild/pex/issues/456
:keyword use_manylinux: (optional) Whether or not to use manylinux for linux resolves.
:returns: List of :class:`pkg_resources.Distribution` instances meeting ``requirements``.
:raises Unsatisfiable: If ``requirements`` is not transitively satisfiable.
:raises Untranslateable: If no compatible distributions could be acquired for
a particular requirement.
This method improves upon the setuptools dependency resolution algorithm by maintaining sets of
all compatible distributions encountered for each requirement rather than the single best
distribution encountered for each requirement. This prevents situations where ``tornado`` and
``tornado==2.0`` could be treated as incompatible with each other because the "best
distribution" when encountering ``tornado`` was tornado 3.0. Instead, ``resolve`` maintains the
set of compatible distributions for each requirement as it is encountered, and iteratively filters
the set. If the set of distributions ever becomes empty, then ``Unsatisfiable`` is raised.
.. versionchanged:: 0.8
A number of keywords were added to make requirement resolution slightly easier to configure.
The optional ``obtainer`` keyword was replaced by ``fetchers``, ``translator``, ``context``,
``threads``, ``precedence``, ``cache`` and ``cache_ttl``, also all optional keywords.
.. versionchanged:: 1.0
The ``translator`` and ``threads`` keywords have been removed. The choice of threading
policy is now implicit. The choice of translation policy is dictated by ``precedence``
directly.
.. versionchanged:: 1.0
``resolver`` is now just a wrapper around the :class:`Resolver` and :class:`CachingResolver`
classes.
"""
builder = ResolverOptionsBuilder(fetchers=fetchers,
allow_prereleases=allow_prereleases,
use_manylinux=use_manylinux,
precedence=precedence,
context=context)
if cache:
resolver = CachingResolver(cache,
cache_ttl,
allow_prereleases=allow_prereleases,
use_manylinux=use_manylinux,
interpreter=interpreter,
platform=platform,
pkg_blacklist=pkg_blacklist)
else:
resolver = Resolver(allow_prereleases=allow_prereleases,
use_manylinux=use_manylinux,
interpreter=interpreter,
platform=platform,
pkg_blacklist=pkg_blacklist)
return resolver.resolve(resolvables_from_iterable(requirements, builder))
def resolve_multi(requirements,
fetchers=None,
interpreters=None,
platforms=None,
context=None,
precedence=None,
cache=None,
cache_ttl=None,
allow_prereleases=None,
pkg_blacklist=None,
use_manylinux=None):
"""A generator function that produces all distributions needed to meet `requirements`
for multiple interpreters and/or platforms.
:param requirements: An iterator of Requirement-like things, either
:class:`pkg_resources.Requirement` objects or requirement strings.
:keyword fetchers: (optional) A list of :class:`Fetcher` objects for locating packages. If
unspecified, the default is to look for packages on PyPI.
:keyword interpreters: (optional) An iterable of :class:`PythonInterpreter` objects to use
for building distributions and for testing distribution compatibility.
:keyword platforms: (optional) An iterable of PEP425-compatible platform strings to use for
filtering compatible distributions. If unspecified, the current platform is used, as
determined by `Platform.current()`.
:keyword context: (optional) A :class:`Context` object to use for network access. If
unspecified, the resolver will attempt to use the best available network context.
:keyword precedence: (optional) An ordered list of allowable :class:`Package` classes
to be used for producing distributions. For example, if precedence is supplied as
``(WheelPackage, SourcePackage)``, wheels will be preferred over building from source, and
eggs will not be used at all. If ``(WheelPackage, EggPackage)`` is suppplied, both wheels and
eggs will be used, but the resolver will not resort to building anything from source.
:keyword cache: (optional) A directory to use to cache distributions locally.
:keyword cache_ttl: (optional integer in seconds) If specified, consider non-exact matches when
resolving requirements. For example, if ``setuptools==2.2`` is specified and setuptools 2.2 is
available in the cache, it will always be used. However, if a non-exact requirement such as
``setuptools>=2,<3`` is specified and there exists a setuptools distribution newer than
cache_ttl seconds that satisfies the requirement, then it will be used. If the distribution
is older than cache_ttl seconds, it will be ignored. If ``cache_ttl`` is not specified,
resolving inexact requirements will always result in making network calls through the
``context``.
:keyword allow_prereleases: (optional) Include pre-release and development versions. If
unspecified only stable versions will be resolved, unless explicitly included.
:keyword pkg_blacklist: (optional) A blacklist dict (str->str) that maps package name to
an interpreter constraint. If a package name is in the blacklist and its interpreter
constraint matches the target interpreter, skip the requirement. This is needed to ensure
that universal requirement resolves for a target interpreter version do not error out on
interpreter specific requirements such as backport libs like `functools32`.
For example, a valid blacklist is {'functools32': 'CPython>3'}.
NOTE: this keyword is a temporary fix and will be reverted in favor of a long term solution
tracked by: https://github.com/pantsbuild/pex/issues/456
:yields: All :class:`pkg_resources.Distribution` instances meeting ``requirements``.
:raises Unsatisfiable: If ``requirements`` is not transitively satisfiable.
:raises Untranslateable: If no compatible distributions could be acquired for
a particular requirement.
"""
interpreters = interpreters or [PythonInterpreter.get()]
platforms = platforms or [Platform.current()]
seen = set()
for interpreter in interpreters:
for platform in platforms:
for resolvable in resolve(requirements,
fetchers,
interpreter,
platform,
context,
precedence,
cache,
cache_ttl,
allow_prereleases,
pkg_blacklist=pkg_blacklist,
use_manylinux=use_manylinux):
if resolvable not in seen:
seen.add(resolvable)
yield resolvable
|
|
import subprocess
import re
import os
import errno
import glob
from collections import OrderedDict
from os.path import expanduser
from bricklayer.doctor.metrics import Metrics
from jinja2 import Environment, PackageLoader
class TLSystem(object):
HOME = expanduser("~")
TL_HOME = os.environ.get("TL_HOME", os.path.join(HOME, "TL/TL_System"))
TL_PROJECT_HOME = os.environ.get("TL_PROJECT_HOME", os.path.join(HOME, "TL_Project"))
MLTON_BIN = "/usr/local/bin/mlton"
MLLEX_BIN = "/usr/local/bin/mllex"
SML_COMMENT_REGEX = "\(\*(.|\s)*?\*\)"
TRANSPILE_ARGS = [
os.path.join(TL_PROJECT_HOME, "Transformation", "bin", "transform"),
"@MLton",
"load-world",
os.path.join(TL_PROJECT_HOME, "Transformation", "bin", "transform.mlton"),
"--",
"--dir={}".format(os.path.join(TL_PROJECT_HOME, "Transformation")),
"--tlp=smlToPython",
"--target-dir={}".format(os.path.join(TL_PROJECT_HOME, "Target")),
"--target-index=0",
"--target-type=single"
]
@staticmethod
def surround_with_quotes(string):
return '"{}"'.format(string)
@classmethod
def compile_sml_file_args(cls, directory, filename):
return map(cls.surround_with_quotes, [
cls.MLTON_BIN,
"-link-opt",
"-fno-PIE",
"-mlb-path-var",
"TL {}".format(cls.TL_HOME),
"-mlb-path-var",
"DOMAIN {}".format(cls.TL_PROJECT_HOME),
"-output",
os.path.join(cls.TL_PROJECT_HOME, "Transformation", "bin", filename),
"-verbose",
"1",
"-const",
"Exn.keepHistory false",
"-profile",
"no",
"-profile-branch",
"false",
"-profile-val",
"false",
os.path.join(cls.TL_HOME, directory, "{}.mlb".format(filename)),
])
@classmethod
def build_tl_args(cls):
build_tokens_args = [
"cp",
cls.surround_with_quotes(os.path.join(cls.TL_PROJECT_HOME, "Transformation", "Syntax", "bricklayer.spec")),
cls.surround_with_quotes(os.path.join(cls.TL_PROJECT_HOME, "Transformation", "bin", "target_tokens.spec")),
'&&',
cls.surround_with_quotes(cls.MLLEX_BIN),
cls.surround_with_quotes(os.path.join(cls.TL_PROJECT_HOME, "Transformation", "bin", "target_tokens.spec")),
]
build_parser_args = cls.compile_sml_file_args("Parse", "parser") + [
"&&",
cls.surround_with_quotes(os.path.join(cls.TL_PROJECT_HOME, "Transformation", "bin", "parser")),
cls.surround_with_quotes("-link-opt"),
cls.surround_with_quotes("-fno-PIE"),
cls.surround_with_quotes(os.path.join(cls.TL_PROJECT_HOME, "Transformation")),
cls.surround_with_quotes("bricklayer.bnf"),
cls.surround_with_quotes("sml_prog")
]
build_parser_table_args = [
"cd",
cls.surround_with_quotes(os.path.join(cls.TL_HOME, "Engine")),
"&&",
cls.surround_with_quotes(os.path.join(cls.TL_PROJECT_HOME, "Transformation", "bin", "parser")),
cls.surround_with_quotes(os.path.join(cls.TL_PROJECT_HOME, "Transformation")),
cls.surround_with_quotes("bricklayer.bnf"),
cls.surround_with_quotes("sml_prog")
]
build_transformer_args = cls.compile_sml_file_args("Transform", "transform") + [
"&&",
cls.surround_with_quotes(os.path.join(cls.TL_PROJECT_HOME, "Transformation", "bin", "transform")),
cls.surround_with_quotes("--dir={}".format(os.path.join(cls.TL_PROJECT_HOME, "Transformation"))),
cls.surround_with_quotes("--grammar=bricklayer.bnf"),
cls.surround_with_quotes("--start-symbol=sml_prog")
]
build_pretty_printer_args = [
"cp",
cls.surround_with_quotes(
os.path.join(cls.TL_PROJECT_HOME, "Transformation", "Syntax",
"format.sty")),
cls.surround_with_quotes(
os.path.join(cls.TL_PROJECT_HOME, "Transformation", "bin",
"format.sty.sml")),
"&&",
] + cls.compile_sml_file_args("PrettyPrint", "prettyprint")
args = reduce(lambda command1, command2: command1 + [';'] + command2,
[build_tokens_args, build_parser_args, build_parser_table_args, build_transformer_args,
build_pretty_printer_args])
return args
@classmethod
def build_parse_tlp_file_args(cls, tlp_file):
return map(cls.surround_with_quotes, [
os.path.join(cls.TL_PROJECT_HOME, "Transformation", "bin", "parser"),
"@MLton",
"load-world",
os.path.join(cls.TL_PROJECT_HOME, "Transformation", "bin", "parser.mlton"),
"--",
"TLP",
os.path.join(cls.TL_PROJECT_HOME, "Transformation"),
tlp_file
])
@classmethod
def initialize_system(cls):
cls.create_transpile_directory()
args = cls.build_tl_args()
print(' '.join(args))
subprocess.call(' '.join(args), shell=True)
@classmethod
def create_transpile_directory(cls):
for target_dir in ["SML_Modules", "TL_Modules", "Syntax", "bin"]:
try:
os.makedirs(os.path.join(cls.TL_PROJECT_HOME, "Transformation", target_dir))
except OSError as e:
if e.errno != errno.EEXIST:
raise
for filename in ['format.sty', 'bricklayer.spec', 'bricklayer.bnf']:
# make the files under syntax
with open(os.path.join(cls.TL_PROJECT_HOME, "Transformation", "Syntax", filename), 'w') as outfile:
env = Environment(loader=PackageLoader('bricklayer', 'templates'))
template = env.get_template(filename)
outfile.write(template.render())
with open(os.path.join(cls.TL_PROJECT_HOME, "Transformation", "TL_Modules", "smlToPython.tlp"), 'w') as outfile:
env = Environment(loader=PackageLoader('bricklayer', 'templates'))
template = env.get_template('smlToPython.tlp')
outfile.write(template.render())
@classmethod
def transpile_all_files(cls):
directory_map = OrderedDict([
('Level_1', ['Assignment_1', 'Assignment_2', 'Assignment_3']),
('Level_2', ['Assignment_4', 'Assignment_5', 'Assignment_6', 'Assignment_7']),
('Level_3', ['Assignment_9', 'Assignment_10', 'Assignment_11', 'Assignment_13', 'Assignment_14']),
('Level_4', ['Assignment_15', 'Assignment_16']),
])
# Clear the stats file
with open(os.path.join(cls.TL_PROJECT_HOME, "Target", "2", "stats.csv"), "w") as output_file:
pass
for parent_dir, children_dirs in directory_map.items():
for child_dir in children_dirs:
full_dir = os.path.join(cls.TL_PROJECT_HOME, "Target", "0", parent_dir, child_dir)
os.chdir(full_dir)
filelist = glob.glob("*no_comments.bl")
for f in filelist:
os.remove(f)
for filename in os.listdir(full_dir):
TLSystem.transpile(os.path.join(parent_dir, child_dir, filename))
@classmethod
def transpile(cls, filename):
new_file = cls.replace_newlines_with_spaces_in_file(filename)
subprocess.call(' '.join(cls.build_parse_tlp_file_args("smlToPython")), shell=True)
subprocess.call(' '.join(map(cls.surround_with_quotes, cls.TRANSPILE_ARGS + [new_file])), shell=True)
metrics = Metrics()
try:
grade = cls.get_grade_from_file(filename)
submission = cls.get_submission_from_file(filename)
metrics.collect_metrics(os.path.join(cls.TL_PROJECT_HOME, "Target", "1", "transpiled.py"))
cls.append_metrics_to_log_file(metrics, grade, submission, filename)
except SyntaxError:
print "Error collecting metrics of filename " + filename
@classmethod
def remove_comments(cls, string):
return re.sub(cls.SML_COMMENT_REGEX, "", string)
@classmethod
def replace_newlines_with_spaces(cls, string):
return string.replace("\r", "\n")
@classmethod
def replace_newlines_with_spaces_in_file(cls, filename):
name, ext = os.path.splitext(filename)
new_filename = os.path.join(cls.TL_PROJECT_HOME, "Target", "0", name + "_no_comments" + ext)
with open(os.path.join(cls.TL_PROJECT_HOME, "Target", "0", filename), 'rb') as input_file:
output = cls.replace_newlines_with_spaces(input_file.read())
with open(new_filename, 'wb') as output_file:
output_file.write(output)
return name + "_no_comments" + ext
@classmethod
def append_metrics_to_log_file(cls, metrics, grade, submission, filename):
with open(os.path.join(cls.TL_PROJECT_HOME, "Target", "2", "stats.csv"), "a+") as output_file:
output_file.write("\n" + ",".join(map(str, [metrics.cyclomatic_complexity, metrics.source_lines_of_code,
metrics.comments, metrics.user_defined_functions, metrics.level,
submission, grade, filename])))
@classmethod
def get_grade_from_file(cls, filename):
with open(os.path.join(cls.TL_PROJECT_HOME, "Target", "0", filename), 'rb') as input_file:
data = input_file.read()
groups = re.search('.*?@grade\s+([0-9]+) / ([0-9]+)', data)
numerator = groups.group(1)
denominator = groups.group(2)
grade = float(numerator) / float(denominator)
if grade >= .94:
return 'A'
elif grade >= .9:
return 'A-'
elif grade >= .87:
return 'B+'
elif grade >= .84:
return 'B'
elif grade >= .8:
return 'B-'
elif grade >= .76:
return 'C+'
elif grade >= .7:
return 'C'
elif grade >= .67:
return 'C-'
elif grade >= .64:
return 'D+'
elif grade >= .6:
return 'D'
else:
return 'F'
@classmethod
def get_submission_from_file(cls, filename):
with open(os.path.join(cls.TL_PROJECT_HOME, "Target", "0", filename), 'rb') as input_file:
data = input_file.read()
groups = re.search('.*?@submission\s+([A-Za-z]+)', data)
print groups.group(1)
submission = groups.group(1)
if submission == "on":
return 1
elif submission == "early":
return 2
else:
return 3
|
|
# -*- coding: utf-8 -*-
"""
Setup script for atoman
@author: Chris Scott
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import glob
import sys
import subprocess
import shutil
import platform
import tempfile
# setuptools is required for entry point
import setuptools
import distutils.sysconfig
import versioneer
# check for openmp following
# http://stackoverflow.com/questions/16549893/programatically-testing-for-openmp-support-from-a-python-setup-script
# see http://openmp.org/wp/openmp-compilers/
omp_test = br"""
#include <omp.h>
#include <stdio.h>
int main() {
#pragma omp parallel
printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads());
}
"""
TEST_OMP_FLAGS = [
"-fopenmp",
"-qopenmp",
]
def check_for_openmp():
try:
cc = os.environ['CC']
except KeyError:
cc = 'gcc'
curdir = os.getcwd()
for omp_flag in TEST_OMP_FLAGS:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
try:
filename = r'test.c'
with open(filename, 'wb', 0) as file:
file.write(omp_test)
with open(os.devnull, 'wb') as fnull:
result = subprocess.call([cc, omp_flag, filename],
stdout=fnull, stderr=fnull)
print('check_for_openmp() result for {}: '.format(omp_flag), result)
if result == 0:
break
finally:
# clean up
shutil.rmtree(tmpdir)
os.chdir(curdir)
return result == 0, omp_flag
HAVE_OMP, OMP_FLAG = check_for_openmp()
print("Have OpenMP: ", HAVE_OMP)
if HAVE_OMP:
print("OpenMP flag: ", OMP_FLAG)
# sphinx build
try:
from sphinx.setup_command import BuildDoc
HAVE_SPHINX = True
except ImportError:
HAVE_SPHINX = False
if HAVE_SPHINX:
class AtomanBuildDoc(BuildDoc):
"""Compile resources and run in-place build before Sphinx doc-build"""
def run(self):
# in place build
ret = subprocess.call([sys.executable, sys.argv[0], 'build_ext', '-i'])
if ret != 0:
raise RuntimeError("Building atoman failed (%d)" % ret)
# build doc
BuildDoc.run(self)
# package configuration method
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path, version=versioneer.get_version())
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage("atoman")
config.add_data_dir(("atoman/doc", os.path.join("doc", "build", "html")))
return config
# clean
def do_clean():
cwd = os.getcwd()
os.chdir("atoman")
try:
for root, dirs, files in os.walk(os.getcwd()):
so_files = glob.glob(os.path.join(root, "*.so"))
for so_file in so_files:
print("rm atoman/%s" % os.path.relpath(so_file))
os.unlink(so_file)
if "resources.py" in files:
os.unlink(os.path.join(root, "resources.py"))
pyc_files = glob.glob(os.path.join(root, "*.pyc"))
for pyc_file in pyc_files:
os.unlink(pyc_file)
finally:
os.chdir(cwd)
for root, dirs, files in os.walk(os.getcwd()):
cachepth = os.path.join(root, "__pycache__")
if os.path.isdir(cachepth):
shutil.rmtree(cachepth)
if os.path.isdir("atoman/doc"):
print("rm -rf atoman/doc")
shutil.rmtree(os.path.join("atoman", "doc"))
# if os.path.isdir(os.path.join("doc", "build")):
# print("rm -rf doc/build/*")
# os.system("rm -rf doc/build/*")
if os.path.isdir("dist"):
print("rm -rf dist/")
shutil.rmtree("dist")
if os.path.isdir("build"):
print("rm -rf build/")
shutil.rmtree("build")
if os.path.isdir("atoman.egg-info"):
print("rm -rf atoman.egg-info/")
shutil.rmtree("atoman.egg-info")
# setup the package
def setup_package():
# clean?
if "clean" in sys.argv:
do_clean()
# documentation (see scipy...)
cmdclass = versioneer.get_cmdclass()
if HAVE_SPHINX:
cmdclass['build_sphinx'] = AtomanBuildDoc
# metadata
metadata = dict(
name="Atoman",
maintainer="Chris Scott",
maintainer_email="[email protected]",
description="Analysis and visualisation of atomistic simulations",
long_description="Analysis and visualisation of atomistic simulations",
author="Chris Scott",
author_email="[email protected]",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: X11 Applications",
"Environment :: X11 Applications :: Qt",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: MacOS",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: C",
"Programming Language :: C++",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Visualization",
],
platforms=["Linux", "Mac OS-X"],
cmdclass=cmdclass,
entry_points={
'gui_scripts': [
'Atoman = atoman.__main__:main',
]
},
zip_safe=False,
)
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info',
'--version', 'clean', 'nosetests',
'test')):
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = versioneer.get_version()
metadata['test_suite'] = "nose.collector"
else:
from numpy.distutils.core import setup
from numpy.distutils.command.build_ext import build_ext
from numpy.distutils.command.build_clib import build_clib
# subclass build_ext to use additional compiler options (eg. for OpenMP)
class build_ext_subclass(build_ext):
def build_extensions(self, *args, **kwargs):
for e in self.extensions:
if HAVE_OMP:
e.extra_compile_args.append(OMP_FLAG)
e.extra_link_args.append(OMP_FLAG)
e.include_dirs.append(distutils.sysconfig.get_python_inc())
return build_ext.build_extensions(self, *args, **kwargs)
# subclass build_clib to use additional compiler options (eg. for OpenMP)
class build_clib_subclass(build_clib):
def build_libraries(self, *args, **kwargs):
for libtup in self.libraries:
opts = libtup[1]
if HAVE_OMP:
if "extra_compiler_args" not in opts:
opts["extra_compiler_args"] = []
opts["extra_compiler_args"].append(OMP_FLAG)
if "include_dirs" not in opts:
opts["include_dirs"] = []
opts["include_dirs"].append(distutils.sysconfig.get_python_inc())
return build_clib.build_libraries(self, *args, **kwargs)
cmdclass["build_ext"] = build_ext_subclass
cmdclass["build_clib"] = build_clib_subclass
metadata["configuration"] = configuration
# run setup
setup(**metadata)
if __name__ == "__main__":
setup_package()
if not HAVE_OMP:
print("Warning: building without OpenMP - it will be slow")
|
|
import os.path
import dj_database_url
from django.contrib.messages import constants as messages
DEBUG = bool(os.environ.get('DEBUG', False))
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
PROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
ROOT_URLCONF = 'saleor.urls'
WSGI_APPLICATION = 'saleor.wsgi.application'
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
INTERNAL_IPS = os.environ.get('INTERNAL_IPS', '127.0.0.1').split()
SQLITE_DB_URL = 'sqlite:///' + os.path.join(PROJECT_ROOT, 'dev.sqlite')
DATABASES = {'default': dj_database_url.config(default=SQLITE_DB_URL)}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
EMAIL_BACKEND = ('django.core.mail.backends.%s.EmailBackend' %
os.environ.get('EMAIL_BACKEND_MODULE', 'console'))
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_PORT = os.environ.get('EMAIL_PORT')
EMAIL_USE_TLS = bool(os.environ.get('EMAIL_USE_TLS', False))
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL')
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'saleor', 'static')
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder'
]
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, 'templates')
]
TEMPLATE_LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# TODO: this one is slow, but for now need for mptt?
'django.template.loaders.eggs.Loader'
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get('SECRET_KEY', '{{ secret_key }}')
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'babeldjango.middleware.LocaleMiddleware',
'saleor.cart.middleware.CartMiddleware',
'saleor.core.middleware.DiscountMiddleware',
'saleor.core.middleware.GoogleAnalytics',
'saleor.core.middleware.CheckHTML'
]
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'saleor.core.context_processors.canonical_hostname',
'saleor.core.context_processors.default_currency'
]
INSTALLED_APPS = [
# External apps that need to go before django's
'offsite_storage',
# Django modules
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sitemaps',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.webdesign',
# Local apps
'saleor.cart',
'saleor.checkout',
'saleor.core',
'saleor.product',
'saleor.order',
'saleor.registration',
'saleor.userprofile',
'saleor.dashboard',
# External apps
'versatileimagefield',
'babeldjango',
'django_prices',
'emailit',
'mptt',
'payments',
'selectable'
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'filters': ['require_debug_true'],
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'saleor': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True
}
}
}
AUTHENTICATION_BACKENDS = (
'saleor.registration.backends.EmailPasswordBackend',
'saleor.registration.backends.ExternalLoginBackend',
'saleor.registration.backends.TrivialBackend'
)
AUTH_USER_MODEL = 'userprofile.User'
CANONICAL_HOSTNAME = os.environ.get('CANONICAL_HOSTNAME', 'localhost:8000')
LOGIN_URL = '/account/login'
WARN_ABOUT_INVALID_HTML5_OUTPUT = False
DEFAULT_CURRENCY = 'USD'
ACCOUNT_ACTIVATION_DAYS = 3
LOGIN_REDIRECT_URL = 'home'
FACEBOOK_APP_ID = os.environ.get('FACEBOOK_APP_ID')
FACEBOOK_SECRET = os.environ.get('FACEBOOK_SECRET')
GOOGLE_ANALYTICS_TRACKING_ID = os.environ.get('GOOGLE_ANALYTICS_TRACKING_ID')
GOOGLE_CLIENT_ID = os.environ.get('GOOGLE_CLIENT_ID')
GOOGLE_CLIENT_SECRET = os.environ.get('GOOGLE_CLIENT_SECRET')
PAYMENT_BASE_URL = 'http://%s/' % CANONICAL_HOSTNAME
PAYMENT_MODEL = 'order.Payment'
PAYMENT_VARIANTS = {
'default': ('payments.dummy.DummyProvider', {})
}
PAYMENT_HOST = os.environ.get('PAYMENT_HOST', 'localhost:8000')
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
CHECKOUT_PAYMENT_CHOICES = [
('default', 'Dummy provider')
]
TEMPLATE_STRING_IF_INVALID = '<< MISSING VARIABLE >>'
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
LOW_STOCK_THRESHOLD = 10
TEST_RUNNER = ''
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost').split()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Amazon S3 configuration
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STATIC_BUCKET_NAME = os.environ.get('AWS_STATIC_BUCKET_NAME')
AWS_MEDIA_ACCESS_KEY_ID = os.environ.get('AWS_MEDIA_ACCESS_KEY_ID')
AWS_MEDIA_SECRET_ACCESS_KEY = os.environ.get('AWS_MEDIA_SECRET_ACCESS_KEY')
AWS_MEDIA_BUCKET_NAME = os.environ.get('AWS_MEDIA_BUCKET_NAME')
if AWS_STATIC_BUCKET_NAME:
STATICFILES_STORAGE = 'offsite_storage.storages.CachedS3FilesStorage'
if AWS_MEDIA_BUCKET_NAME:
DEFAULT_FILE_STORAGE = 'offsite_storage.storages.S3MediaStorage'
THUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-GPU tests for MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.contrib.distribute.python import values
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribute as distribute_lib
GPU_TEST = "test_gpu" in sys.argv[0]
class MirroredTwoDeviceDistributionTest(strategy_test_lib.DistributionTestBase):
def _get_distribution_strategy(self):
devices = ["/device:CPU:0", "/device:GPU:0"]
if GPU_TEST:
self.assertGreater(context.num_gpus(), 0)
if context.num_gpus() > 1:
devices = ["/device:GPU:0", "/device:GPU:1"]
print(self.id().split(".")[-1], "devices:", ", ".join(devices))
return mirrored_strategy.MirroredStrategy(devices)
def testMinimizeLossEager(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_minimize_loss_eager(self._get_distribution_strategy())
def testMinimizeLossGraph(self):
soft_placement = not GPU_TEST
print("testMinimizeLossGraph soft_placement:", soft_placement)
self._test_minimize_loss_graph(
self._get_distribution_strategy(), soft_placement=soft_placement)
def testMapReduce(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_map_reduce(self._get_distribution_strategy())
def testDeviceIndex(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_device_index(self._get_distribution_strategy())
def testTowerId(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_tower_id(self._get_distribution_strategy())
def testNumTowers(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self.assertEqual(2, self._get_distribution_strategy().num_towers)
@test_util.run_in_graph_and_eager_modes()
def testCallAndMergeExceptions(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_call_and_merge_exceptions(self._get_distribution_strategy())
@test_util.run_in_graph_and_eager_modes()
def testRunRegroupError(self):
def run_fn(device_id):
# Generates a list with different lengths on different devices.
# Will fail in _regroup() (if more than one device).
return list(range(device_id))
dist = self._get_distribution_strategy()
with dist.scope(), self.assertRaises(AssertionError):
dist.call_for_each_tower(run_fn, dist.worker_device_index)
@test_util.run_in_graph_and_eager_modes()
def testReduceToCpu(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
def run_fn(device_id):
return device_id
dist = self._get_distribution_strategy()
with dist.scope():
result = dist.call_for_each_tower(run_fn, dist.worker_device_index)
reduced = dist.reduce("sum", result, destinations="/device:CPU:0")
unwrapped = dist.unwrap(reduced)
self.assertEqual(1, len(unwrapped))
expected = sum(range(len(dist.worker_devices)))
self.assertEqual(expected, self.evaluate(unwrapped[0]))
class MirroredStrategyVariableCreationTest(test.TestCase):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
def _skip_eager_if_gpus_less_than(self, num_gpus):
if context.num_gpus() < num_gpus and context.executing_eagerly():
self.skipTest("Enough GPUs not available for this test in eager mode.")
@test_util.run_in_graph_and_eager_modes(config=config)
def testSingleVariable(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
# This variable should be created only once across the threads because of
# special variable_creator functions used by `dist.call_for_each_tower`.
v = variable_scope.variable(1.0, name="foo")
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEquals("foo:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testUnnamedVariable(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v = variable_scope.variable(1.0)
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
# Default name of "Variable" will be used.
self.assertEquals("Variable:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testMultipleVariables(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
vs = []
for i in range(5):
vs.append(variable_scope.variable(1.0, name="foo" + str(i)))
distribute_lib.get_tower_context().merge_call(lambda _: _)
return vs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
for i, v in enumerate(result):
self.assertIsInstance(v, values.MirroredVariable)
self.assertEquals("foo" + str(i) + ":0", v.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testMultipleVariablesWithSameCanonicalName(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
vs = []
vs.append(variable_scope.variable(1.0, name="foo/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar_1"))
vs.append(variable_scope.variable(1.0, name="foo/bar_1"))
distribute_lib.get_tower_context().merge_call(lambda _: _)
return vs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
for v in result:
self.assertIsInstance(v, values.MirroredVariable)
self.assertEquals(4, len(result))
self.assertEquals("foo/bar:0", result[0].name)
self.assertEquals("foo_1/bar:0", result[1].name)
self.assertEquals("foo_1/bar_1:0", result[2].name)
self.assertEquals("foo/bar_1:0", result[3].name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testVariableWithSameCanonicalNameAcrossThreads(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(device_id):
v = variable_scope.variable(1.0, name="foo_" + str(device_id))
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(
model_fn, dist.worker_device_index, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
# The resulting mirrored variable will use the name from the first device.
self.assertEquals("foo_0:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testWithLayers(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(features):
with variable_scope.variable_scope("common"):
layer1 = core.Dense(1)
layer1(features)
layer2 = core.Dense(1)
layer2(features)
# This will pause the current thread, and execute the other thread.
distribute_lib.get_tower_context().merge_call(lambda _: _)
layer3 = core.Dense(1)
layer3(features)
return [(layer1.kernel, layer1.bias),
(layer2.kernel, layer2.bias),
(layer3.kernel, layer3.bias)]
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
features = dist.distribute_dataset(
lambda: dataset_ops.Dataset.from_tensors([[1.]]).repeat(10)
).make_one_shot_iterator().get_next()
with dist.scope():
result = dist.call_for_each_tower(
model_fn, features, run_concurrently=False)
suffixes = ["", "_1", "_2"]
for (kernel, bias), suffix in zip(result, suffixes):
self.assertIsInstance(kernel, values.MirroredVariable)
self.assertEquals("common/dense" + suffix + "/kernel:0", kernel.name)
self.assertIsInstance(bias, values.MirroredVariable)
self.assertEquals("common/dense" + suffix + "/bias:0", bias.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testWithGetVariableAndVariableScope(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn():
v0 = variable_scope.get_variable("var-thread0", [1])
with variable_scope.variable_scope("common"):
v1 = variable_scope.get_variable("var-thread1", [1])
# This will pause the current thread, and execute the other thread.
distribute_lib.get_tower_context().merge_call(lambda _: _)
v2 = variable_scope.get_variable("var-thread2", [1])
return v0, v1, v2
devices = ["/device:CPU:0", "/device:GPU:0"]
dist = mirrored_strategy.MirroredStrategy(devices)
with dist.scope():
with variable_scope.variable_scope("main"):
v = variable_scope.get_variable("var-main0", [1])
self.assertEquals("main/var-main0:0", v.name)
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(3, len(result))
v0, v1, v2 = result
self.assertIsInstance(v0, values.MirroredVariable)
self.assertEquals("main/var-thread0:0", v0.name)
self.assertIsInstance(v1, values.MirroredVariable)
self.assertEquals("main/common/var-thread1:0", v1.name)
self.assertIsInstance(v2, values.MirroredVariable)
self.assertEquals("main/common/var-thread2:0", v2.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testThreeDevices(self):
self._skip_eager_if_gpus_less_than(2)
def model_fn():
v = variable_scope.variable(1.0, name="foo")
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"])
with dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEquals("foo:0", result.name)
@test_util.run_in_graph_and_eager_modes(config=config)
def testNonMatchingVariableCreation(self):
self._skip_eager_if_gpus_less_than(1)
def model_fn(name):
v = variable_scope.variable(1.0, name=name)
distribute_lib.get_tower_context().merge_call(lambda _: _)
return v
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
names = values.DistributedValues({
"/device:CPU:0": "foo",
"/device:GPU:0": "bar"
})
with self.assertRaises(RuntimeError):
_ = dist.call_for_each_tower(model_fn, names, run_concurrently=False)
@test_util.run_in_graph_and_eager_modes(config=config)
def testTowerLocalVariable(self):
self._skip_eager_if_gpus_less_than(1)
all_v_sum = {}
all_v_mean = {}
def model_fn(device_id):
tower_context = distribute_lib.get_tower_context()
with tower_context.tower_local_var_scope("sum"):
v_sum = variable_scope.variable(1.0)
with tower_context.tower_local_var_scope("mean"):
v_mean = variable_scope.variable(4.0)
self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
self.assertTrue(isinstance(v_mean, values.TowerLocalVariable))
updates = [v_sum.assign_add(2.0 + device_id),
v_mean.assign(6.0 * device_id)]
all_v_sum[device_id] = v_sum
all_v_mean[device_id] = v_mean
return updates, v_sum, v_mean
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with dist.scope():
# Create "sum" and "mean" versions of TowerLocalVariables.
ret_ops, ret_v_sum, ret_v_mean = dist.call_for_each_tower(
model_fn, dist.worker_device_index, run_concurrently=False)
# Should see the same wrapping instance in all towers.
self.assertIs(all_v_sum[0], ret_v_sum)
self.assertIs(all_v_mean[0], ret_v_mean)
for i in range(1, dist.num_towers):
self.assertIs(all_v_sum[0], all_v_sum[1])
self.assertIs(all_v_mean[0], all_v_mean[1])
# Apply updates
self.evaluate(variables.global_variables_initializer())
self.evaluate([y for x in ret_ops for y in dist.unwrap(x)])
expected_sum = 0.0
expected_mean = 0.0
for i, d in enumerate(dist.worker_devices):
# Should see different values on different devices.
v_sum_value = self.evaluate(ret_v_sum.get(d).read_value())
v_mean_value = self.evaluate(ret_v_mean.get(d).read_value())
expected = i + 3.0
self.assertEqual(expected, v_sum_value)
expected_sum += expected
expected = i * 6.0
self.assertEqual(expected, v_mean_value)
expected_mean += expected
expected_mean /= len(dist.worker_devices)
# Without get(device), should return the value you get by
# applying the reduction across all towers (whether you use
# fetch(), get(), or nothing).
self.assertEqual(expected_sum, self.evaluate(dist.fetch(ret_v_sum)))
self.assertEqual(expected_mean, self.evaluate(dist.fetch(ret_v_mean)))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum.get()))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean.get()))
if not context.executing_eagerly():
self.assertEqual(expected_sum, self.evaluate(ret_v_sum))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean))
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
# testing this in eager mode.
def testNameScope(self):
def model_fn():
with ops.name_scope("foo"):
a = constant_op.constant(1.0, name="a")
distribute_lib.get_tower_context().merge_call(lambda _: _)
b = constant_op.constant(1.0, name="b")
return a, b
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
with ops.name_scope("main"):
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = dist.unwrap(v)
self.assertEquals("main/foo/" + name + ":0", v0.name)
self.assertEquals("main/tower_1/foo/" + name + ":0", v1.name)
def testWithDefaultName(self):
def model_fn():
with ops.name_scope(None, "foo"):
a = constant_op.constant(1.0, name="a")
distribute_lib.get_tower_context().merge_call(lambda _: _)
b = constant_op.constant(2.0, name="b")
return a, b
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
self.assertEquals(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = dist.unwrap(v)
self.assertEquals("foo/" + name + ":0", v0.name)
self.assertEquals("tower_1/foo/" + name + ":0", v1.name)
# variable_scope.variable() respects name scopes when creating
# variables. On the other hand variable_scope.get_variable() ignores name
# scopes when creating variables. We test both methods of creating variables
# to make sure that we have the same variable names in both cases.
def testNameScopeWithVariable(self):
def in_cross_tower(_):
c = variable_scope.variable(1.0, name="c")
return c
def model_fn():
b = variable_scope.variable(1.0, name="b")
with ops.name_scope("foo"):
c = distribute_lib.get_tower_context().merge_call(in_cross_tower)
return b, c
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
with ops.name_scope("main"):
a = variable_scope.variable(1.0, name="a")
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = dist.unwrap(a)
b0, b1 = dist.unwrap(result_b)
c0, c1 = dist.unwrap(result_c)
self.assertEquals("main/a:0", a0.name)
self.assertEquals("main/a/replica_1:0", a1.name)
self.assertEquals("main/b:0", b0.name)
self.assertEquals("main/b/replica_1:0", b1.name)
self.assertEquals("main/foo/c:0", c0.name)
self.assertEquals("main/foo/c/replica_1:0", c1.name)
def testNameScopeWithGetVariable(self):
def in_cross_tower(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with ops.name_scope("foo"):
c = distribute_lib.get_tower_context().merge_call(in_cross_tower)
return b, c
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
with ops.name_scope("main"):
a = variable_scope.get_variable("a", [1])
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = dist.unwrap(a)
b0, b1 = dist.unwrap(result_b)
c0, c1 = dist.unwrap(result_c)
self.assertEquals("a:0", a0.name)
self.assertEquals("a/replica_1:0", a1.name)
self.assertEquals("b:0", b0.name)
self.assertEquals("b/replica_1:0", b1.name)
self.assertEquals("c:0", c0.name)
self.assertEquals("c/replica_1:0", c1.name)
def testDynamicRnnVariables(self):
def model_fn():
inputs = constant_op.constant(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])
cell_fw = rnn_cell_impl.LSTMCell(300)
cell_bw = rnn_cell_impl.LSTMCell(300)
(outputs, _) = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32)
return outputs
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
# Two variables are created by the RNN layer.
self.assertEquals(2, len(result))
for v in result:
self.assertIsInstance(v, values.DistributedValues)
_, v1 = dist.unwrap(v)
self.assertStartsWith(v1.name, "tower_1/")
if __name__ == "__main__":
test.main()
|
|
# adapted from https://github.com/lisa-lab/DeepLearningTutorials
from collections import OrderedDict
import copy
import os
import re
import codecs
import random
import timeit
from hyperopt import STATUS_OK
import numpy as np
import pandas as pd
from scipy import stats
import theano
from theano import tensor as T
import common
from ..util import defines
from ..util import file_handling as fh
from ..experiment import reusable_holdout
from ..experiment import evaluation
# Otherwise the deepcopy fails
import sys
sys.setrecursionlimit(5000)
THEANO_FLAGS='floatX=float32'
# utils functions
def shuffle(lol, seed=None):
'''
lol :: list of list as input
seed :: seed the shuffling
shuffle inplace each list in the same order
'''
for l in lol:
random.seed(seed)
random.shuffle(l)
def contextwin(l, win):
'''
win :: int corresponding to the size of the window
given a list of indexes composing a sentence
l :: array containing the word indexes
it will return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
'''
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win // 2 * [-1] + l + win // 2 * [-1]
out = [lpadded[i:(i + win)] for i in range(len(l))]
assert len(out) == len(l)
return out
class RNN(object):
''' elman neural net model '''
def __init__(self, nh, nc, ne, de, cs, init_scale=0.2, initial_embeddings=None,
rnn_type='basic', # 'basic', 'GRU', or 'LSTM'
pooling_method='max', #'max', 'mean', 'attention1' or 'attention2',
extra_input_dims=0, train_embeddings=True,
bidirectional=True, bi_combine='concat' # 'concat', 'sum', or 'mean'
):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# initialize parameters
dx = de * cs
if extra_input_dims > 0:
dx += extra_input_dims
bi = 1
if bidirectional and bi_combine == 'concat':
bi = 2
if initial_embeddings is None:
self.emb = theano.shared(name='embeddings',
value=init_scale * np.random.uniform(-1.0, 1.0,
(ne, de)).astype(theano.config.floatX))
#(ne+1, de)) # add one for padding at the end
else:
self.emb = theano.shared(name='embeddings', value=initial_embeddings.astype(theano.config.floatX))
if extra_input_dims > 0:
self.W_drld = theano.shared(name='W_drld', value=init_scale * np.random.uniform(-1.0, 1.0, (1, nh))
.astype(theano.config.floatX))
# common paramters (feeding into hidden node)
self.W_xh = theano.shared(name='W_xh', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hh = theano.shared(name='W_hh', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_h = theano.shared(name='b_h', value=np.array(np.random.uniform(0.0, 1.0, nh),
dtype=theano.config.floatX))
# output layer parameters
self.W_s = theano.shared(name='W_s', value=init_scale * np.random.uniform(-1.0, 1.0, (nh * bi, nc))
.astype(theano.config.floatX))
self.b_s = theano.shared(name='b_s', value=np.zeros(nc, dtype=theano.config.floatX))
# temporary parameters
#self.h_i_f = theano.shared(name='h_i_f', value=np.zeros(nh, dtype=theano.config.floatX))
if bidirectional:
self.h_i_r = theano.shared(name='h_i_r', value=np.zeros(nh, dtype=theano.config.floatX))
# Attention parameters
if pooling_method == 'attention1' or pooling_method == 'attention2':
self.W_a = theano.shared(name='W_a', value=init_scale * np.random.uniform(-1.0, 1.0, (bi*nh, 1))
.astype(theano.config.floatX))
self.b_a = theano.shared(name='b_a', value=0.0)
# GRU parameters
if rnn_type == 'GRU':
self.W_xr = theano.shared(name='W_xr', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hr = theano.shared(name='W_hr', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_r = theano.shared(name='b_r', value=np.zeros(nh, dtype=theano.config.floatX))
self.W_xz = theano.shared(name='W_xz', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hz = theano.shared(name='W_hz', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_z = theano.shared(name='b_z', value=np.zeros(nh, dtype=theano.config.floatX))
# LSTM paramters
if rnn_type == 'LSTM':
# forget gate (needs special initialization)
self.W_xf = theano.shared(name='W_xf', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hf = theano.shared(name='W_hf', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.W_cf = theano.shared(name='W_cf', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_f = theano.shared(name='b_f', value=np.array(np.random.uniform(0.0, 1.0, nh),
dtype=theano.config.floatX))
# input gate
self.W_xi = theano.shared(name='W_xi', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_hi = theano.shared(name='W_hi', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.W_ci = theano.shared(name='W_ci', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_i = theano.shared(name='b_i', value=np.zeros(nh, dtype=theano.config.floatX))
# output gate
self.W_xo = theano.shared(name='W_xo', value=init_scale * np.random.uniform(-1.0, 1.0, (dx, nh))
.astype(theano.config.floatX))
self.W_ho = theano.shared(name='W_ho', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.W_co = theano.shared(name='W_co', value=init_scale * np.random.uniform(-1.0, 1.0, (nh, nh))
.astype(theano.config.floatX))
self.b_o = theano.shared(name='b_o', value=np.zeros(nh, dtype=theano.config.floatX))
# use normal ->hidden weights for memory cell
# temp
self.c_i_f = theano.shared(name='c_i_f', value=np.zeros(nh, dtype=theano.config.floatX))
if bidirectional:
self.c_i_r = theano.shared(name='c_i_r', value=np.zeros(nh, dtype=theano.config.floatX))
self.params = [self.W_xh, self.W_hh, self.b_h,
self.W_s, self.b_s]
#self.params += [self.h_i_f]
if train_embeddings:
self.params += [self.emb]
if pooling_method == 'attention':
self.params += [self.W_a, self.b_a]
if rnn_type == 'GRU':
self.params += [self.W_xr, self.W_hr, self.b_r,
self.W_xz, self.W_hz, self.b_z]
if rnn_type == 'LSTM':
self.params += [self.W_xf, self.W_hf, self.W_cf, self.b_f,
self.W_xi, self.W_hi, self.W_ci, self.b_i,
self.W_xo, self.W_ho, self.W_co, self.b_o,
self.c_i_f]
if bidirectional:
self.params += [self.c_i_r]
if bidirectional:
self.params += [self.h_i_r]
# create an X object based on the size of the object at the index [elements, emb_dim * window]
idxs = T.imatrix()
if extra_input_dims:
extra = T.imatrix()
x = T.concatenate([self.emb[idxs].reshape((idxs.shape[0], de*cs)),
T.repeat(extra, idxs.shape[0], axis=0)], axis=1)
else:
x = self.emb[idxs].reshape((idxs.shape[0], de*cs))
# create a vector for y
y = T.ivector('y')
def recurrence_basic(x_t, h_tm1):
h_t = T.nnet.sigmoid(T.dot(x_t, self.W_xh) + T.dot(h_tm1, self.W_hh) + self.b_h)
return h_t
def recurrence_basic_reverse(x_t, h_tp1):
h_t = T.nnet.sigmoid(T.dot(x_t, self.W_xh) + T.dot(h_tp1, self.W_hh) + self.b_h)
return h_t
def recurrence_gru(x_t, h_tm1):
r_t = T.nnet.sigmoid(T.dot(x_t, self.W_xr) + T.dot(h_tm1, self.W_hr) + self.b_r)
z_t = T.nnet.sigmoid(T.dot(x_t, self.W_xz) + T.dot(h_tm1, self.W_hz) + self.b_z)
g_t = T.tanh(T.dot(x_t, self.W_xh) + r_t * T.dot(h_tm1, self.W_hh) + self.b_h)
h_t = (1 - z_t) * h_tm1 + z_t * g_t
return h_t
def recurrence_gru_reverse(x_t, h_tp1):
r_t = T.nnet.sigmoid(T.dot(x_t, self.W_xr) + T.dot(h_tp1, self.W_hr) + self.b_r)
z_t = T.nnet.sigmoid(T.dot(x_t, self.W_xz) + T.dot(h_tp1, self.W_hz) + self.b_z)
g_t = T.tanh(T.dot(x_t, self.W_xh) + r_t * T.dot(h_tp1, self.W_hh) + self.b_h)
h_t = (1 - z_t) * h_tp1 + z_t * g_t
return h_t
def recurrence_lstm(x_t, h_tm1, c_tm1):
i_t = T.nnet.sigmoid(T.dot(x_t, self.W_xi) + T.dot(h_tm1, self.W_hi) + T.dot(c_tm1, self.W_ci) + self.b_i)
f_t = T.nnet.sigmoid(T.dot(x_t, self.W_xf) + T.dot(h_tm1, self.W_hf) + T.dot(c_tm1, self.W_cf) + self.b_f)
d_t = T.tanh(T.dot(x_t, self.W_xh) + T.dot(h_tm1, self.W_hh) + self.b_h)
c_t = f_t * c_tm1 + i_t * d_t
o_t = T.nnet.sigmoid(T.dot(x_t, self.W_xo) + T.dot(h_tm1, self.W_ho) + T.dot(c_t, self.W_co) + self.b_o)
h_t = o_t * c_t
return [h_t, c_t]
def recurrence_lstm_reverse(x_t, h_tp1, c_tp1):
i_t = T.nnet.sigmoid(T.dot(x_t, self.W_xi) + T.dot(h_tp1, self.W_hi) + T.dot(c_tp1, self.W_ci) + self.b_i)
f_t = T.nnet.sigmoid(T.dot(x_t, self.W_xf) + T.dot(h_tp1, self.W_hf) + T.dot(c_tp1, self.W_cf) + self.b_f)
d_t = T.tanh(T.dot(x_t, self.W_xh) + T.dot(h_tp1, self.W_hh) + self.b_h)
c_t = f_t * c_tp1 + i_t * d_t
o_t = T.nnet.sigmoid(T.dot(x_t, self.W_xo) + T.dot(h_tp1, self.W_ho) + T.dot(c_t, self.W_co) + self.b_o)
h_t = o_t * c_t
return [h_t, c_t]
h_r = None
if rnn_type == 'GRU':
h_f, _ = theano.scan(fn=recurrence_gru, sequences=x, outputs_info=[self.h_i_f], n_steps=x.shape[0])
if bidirectional:
h_r, _ = theano.scan(fn=recurrence_gru_reverse, sequences=x, outputs_info=[self.h_i_r],
go_backwards=True)
elif rnn_type == 'LSTM':
[h_f, c_f], _ = theano.scan(fn=recurrence_lstm, sequences=x,
outputs_info=[self.h_i_f, self.c_i_f], n_steps=x.shape[0])
if bidirectional:
[h_r, c_r], _ = theano.scan(fn=recurrence_lstm_reverse, sequences=x,
outputs_info=[self.h_i_r, self.c_i_r], go_backwards=True)
else:
h_f, _ = theano.scan(fn=recurrence_basic, sequences=x,
outputs_info=[T.alloc(np.array(0.), nh)],
n_steps=x.shape[0])
if bidirectional:
h_r, _ = theano.scan(fn=recurrence_basic_reverse, sequences=x, outputs_info=[self.h_i_r],
go_backwards=True)
if bidirectional:
# reverse the second hidden layer so it lines up with the first
h_r = h_r[::-1, :]
if bi_combine == 'max':
h = T.maximum(h_f, h_r)
elif bi_combine == 'mean':
h = (h_f + h_r) / 2.0
else: # concatenate
#h = theano.printing.Print('h:')(T.concatenate([h_fp, h_rp], axis=1))
h = T.concatenate([h_f, h_r], axis=1)
else:
h = h_f
a_sum = T.sum([1])
if pooling_method == 'attention1': # combine hidden nodes, then transform and sigmoid
# SOFTMAX normalizes across the row (axis=1)
a = T.nnet.softmax((T.dot(h, self.W_a) + self.b_a).T) # [1, n_elements]: normalized vector
a_sum = T.sum(a) # to check a is normalized
p_y_given_x_sentence = T.nnet.sigmoid(T.dot(T.dot(a, h), self.W_s) + self.b_s) # [1, nc] in R(0,1)
y_pred = T.max(p_y_given_x_sentence, axis=0) > 0.5 # note, max is just to coerce into proper shape
element_weights = T.outer(a, p_y_given_x_sentence) # [ne, nc]
elif pooling_method == 'attention2': # transform hidden nodes, sigmoid, then combine
a = T.nnet.softmax((T.dot(h, self.W_a) + self.b_a).T) # [1, n_elements]: normalized vector
a_sum = T.sum(a)
temp = T.nnet.sigmoid(T.dot(h, self.W_s) + self.b_s) # [ne x nc]
p_y_given_x_sentence = T.dot(a, temp) # [1, nc] in R(0,1)
y_pred = T.max(p_y_given_x_sentence, axis=0) > 0.5 # note, max is just to coerce into proper shape
element_weights = T.repeat(a.T, nc, axis=1) * temp # [ne, nc]
elif pooling_method == 'mean':
s = T.nnet.sigmoid((T.dot(h, self.W_s) + self.b_s)) # [n_elements, nc] in R(0,1)
p_y_given_x_sentence = T.mean(s, axis=0)
y_pred = p_y_given_x_sentence > 0.5
element_weights = s
else: # pooling_method == 'max'
s = T.nnet.sigmoid((T.dot(h, self.W_s) + self.b_s)) # [n_elements, nc] in R(0,1)
p_y_given_x_sentence = T.max(s, axis=0)
y_pred = p_y_given_x_sentence > 0.5
element_weights = s
# cost and gradients and learning rate
lr = T.scalar('lr_main')
lr_emb_fac = T.scalar('lr_emb')
sentence_nll = -T.sum(y * T.log(p_y_given_x_sentence) + (1-y)*T.log(1-p_y_given_x_sentence))
sentence_gradients = T.grad(sentence_nll, self.params)
sentence_updates = OrderedDict((p, p - lr * g) for p, g in zip(self.params, [lr_emb_fac *
sentence_gradients[0]]
+ sentence_gradients[1:]))
# theano functions to compile
if extra_input_dims > 0:
self.sentence_classify = theano.function(inputs=[idxs, extra], outputs=y_pred)
self.sentence_train = theano.function(inputs=[idxs, extra, y, lr, lr_emb_fac],
outputs=sentence_nll,
updates=sentence_updates)
if pooling_method == 'attention1' or pooling_method == 'attention2':
self.a_sum_check = theano.function(inputs=[idxs, extra], outputs=a_sum)
else:
self.sentence_classify = theano.function(inputs=[idxs], outputs=y_pred)
self.sentence_train = theano.function(inputs=[idxs, y, lr, lr_emb_fac],
outputs=sentence_nll,
updates=sentence_updates)
if pooling_method == 'attention1' or pooling_method == 'attention2':
self.a_sum_check = theano.function(inputs=[idxs], outputs=a_sum)
self.normalize = theano.function(inputs=[],
updates={self.emb: self.emb / T.sqrt((self.emb**2).sum(axis=1))
.dimshuffle(0, 'x')})
def classify(self, x, window_size, extra_input_dims=0, extra=None):
cwords = contextwin(x, window_size)
# make an array of these windows
words = map(lambda x: np.asarray(x).astype('int32'), cwords)
if extra_input_dims > 0:
extra = np.array(extra).astype('int32').reshape((1, extra_input_dims))
return self.sentence_classify(words, extra)
else:
return self.sentence_classify(words)
def train(self, x, y, window_size, learning_rate, emb_lr_factor, extra_input_dims=0, extra=None):
# concatenate words in a window
cwords = contextwin(x, window_size)
# make an array of these windows
words = map(lambda x: np.asarray(x).astype('int32'), cwords)
# train on these sentences and normalize
if extra_input_dims > 0:
extra = np.array(extra).astype('int32').reshape((1, extra_input_dims))
nll = self.sentence_train(words, extra, y, learning_rate, emb_lr_factor)
else:
nll = self.sentence_train(words, y, learning_rate, emb_lr_factor)
self.normalize()
return nll
def save(self, output_dir):
for param in self.params:
np.save(os.path.join(output_dir, param.name + '.npy'), param.get_value())
def load(self, input_dir):
for param in self.params:
param.set_value(np.load(os.path.join(input_dir, param.name + '.npy')))
def print_embeddings(self):
for param in self.params:
print param.name, param.get_value()
def main(params=None):
if params is None:
params = {
'exp_name': 'minibatch_test',
'test_fold': 0,
'n_dev_folds': 1,
'min_doc_thresh': 1,
'initialize_word_vectors': True,
'vectors': 'anes_word2vec', # default_word2vec, anes_word2vec ...
'word2vec_dim': 300,
'init_scale': 0.2,
'add_OOV': True,
'win': 3, # size of context window
'add_DRLD': False,
'rnn_type': 'basic', # basic, GRU, or LSTM
'n_hidden': 3, # size of hidden units
'pooling_method': 'max', # max, mean, or attention1/2
'bidirectional': False,
'bi_combine': 'mean', # concat, max, or mean
'train_embeddings': True,
'lr': 0.1, # learning rate
'lr_emb_fac': 0.2, # factor to modify learning rate for embeddings
'decay_delay': 5, # number of epochs with no improvement before decreasing learning rate
'decay_factor': 0.5, # factor by which to multiply learning rate in case of delay
'n_epochs': 10,
'add_OOV_noise': False,
'OOV_noise_prob': 0.01,
'minibatch_size': 1,
'ensemble': False,
'save_model': True,
'seed': 42,
'verbose': 1,
'reuse': False,
'orig_T': 0.04,
'tau': 0.01
}
# load params from a previous experiment
params = fh.read_json('/Users/dcard/Projects/CMU/ARK/guac/experiments/best_mod.json')
params['exp_name'] += '_minibatch_16'
params['n_hidden'] = int(params['n_hidden'])
params['orig_T'] = 0.02
params['tau'] = 0.005
reuser = None
if params['reuse']:
reuser = reusable_holdout.ReuseableHoldout(T=params['orig_T'], tau=params['tau'])
keys = params.keys()
keys.sort()
for key in keys:
print key, ':', params[key]
# seed the random number generators
np.random.seed(params['seed'])
random.seed(params['seed'])
datasets = ['Democrat-Likes', 'Democrat-Dislikes', 'Republican-Likes', 'Republican-Dislikes']
np.random.seed(params['seed'])
random.seed(params['seed'])
best_valid_f1s = []
best_test_f1s = []
test_prediction_arrays = []
output_dir = fh.makedirs(defines.exp_dir, 'rnn', params['exp_name'])
output_filename = fh.make_filename(output_dir, 'params', 'json')
fh.write_to_json(params, output_filename)
for dev_fold in range(params['n_dev_folds']):
print "dev fold =", dev_fold
output_dir = fh.makedirs(defines.exp_dir, 'rnn', params['exp_name'], 'fold' + str(dev_fold))
results = []
all_data, words2idx, items, all_labels = common.load_data(datasets, params['test_fold'], dev_fold,
params['min_doc_thresh'])
train_xy, valid_xy, test_xy = all_data
train_lex, train_y = train_xy
valid_lex, valid_y = valid_xy
test_lex, test_y = test_xy
train_items, dev_items, test_items = items
vocsize = len(words2idx.keys())
idx2words = dict((k, v) for v, k in words2idx.iteritems())
best_test_predictions = None
n_sentences = len(train_lex)
print "vocsize = ", vocsize, 'n_train', n_sentences
codes = all_labels.columns
n_items, n_codes = all_labels.shape
# get the words in the sentences for the test and validation sets
words_valid = [map(lambda x: idx2words[x], w) for w in valid_lex]
groundtruth_test = test_y[:]
words_test = [map(lambda x: idx2words[x], w) for w in test_lex]
initial_embeddings = common.load_embeddings(params, words2idx)
OOV_index = words2idx['__OOV__']
emb_dim = initial_embeddings.shape[1]
print 'emb_dim =', emb_dim
extra_input_dims = 0
if params['add_DRLD']:
extra_input_dims = 2
print "Building RNN"
rnn = RNN(nh=params['n_hidden'],
nc=n_codes,
ne=vocsize,
de=emb_dim,
cs=params['win'],
extra_input_dims=extra_input_dims,
initial_embeddings=initial_embeddings,
init_scale=params['init_scale'],
rnn_type=params['rnn_type'],
train_embeddings=params['train_embeddings'],
pooling_method=params['pooling_method'],
bidirectional=params['bidirectional'],
bi_combine=params['bi_combine']
)
train_likes = [1 if re.search('Likes', i) else 0 for i in train_items]
dev_likes = [1 if re.search('Likes', i) else 0 for i in dev_items]
test_likes = [1 if re.search('Likes', i) else 0 for i in test_items]
train_dem = [1 if re.search('Democrat', i) else 0 for i in train_items]
dev_dem = [1 if re.search('Democrat', i) else 0 for i in dev_items]
test_dem = [1 if re.search('Democrat', i) else 0 for i in test_items]
train_extra = [[train_likes[i], train_dem[i]] for i, t in enumerate(train_items)]
dev_extra = [[dev_likes[i], dev_dem[i]] for i, t in enumerate(dev_items)]
test_extra = [[test_likes[i], test_dem[i]] for i, t in enumerate(test_items)]
# train with early stopping on validation set
best_f1 = -np.inf
params['clr'] = params['lr']
for e in xrange(params['n_epochs']):
# shuffle
shuffle([train_lex, train_y, train_extra], params['seed']) # shuffle the input data
params['ce'] = e # store the current epoch
tic = timeit.default_timer()
#for i, (x, y) in enumerate(zip(train_lex, train_y)):
for i, orig_x in enumerate(train_lex):
n_words = len(orig_x)
if params['add_OOV_noise']:
draws = np.random.rand(n_words)
x = [OOV_index if draws[idx] < params['OOV_noise_prob'] else orig_x[idx] for idx in range(n_words)]
else:
x = orig_x
y = train_y[i]
extra = train_extra[i]
if i == 0:
print ' '.join([idx2words[w] for w in train_lex[i]])
if i == 0:
print x
print y
nll = rnn.train(x, y, params['win'], params['clr'], params['lr_emb_fac'],
extra_input_dims, extra)
if float(i/100.0) == float(i//100):
print nll
print '[learning] epoch %i >> %2.2f%%' % (
e, (i + 1) * 100. / float(n_sentences)),
print 'completed in %.2f (sec) <<\r' % (timeit.default_timer() - tic),
sys.stdout.flush()
#if i == 0:
# print ' '.join([idx2words[idx] for idx in orig_x])
# print rnn.classify(orig_x, params['win'], extra_input_dims, extra)
if np.isnan(nll) or np.isinf(nll):
return {'loss': nll,
'final_test_f1': 0,
'valid_f1s': [0],
'test_f1s': [0],
'status': STATUS_OK
}
# evaluation // back into the real world : idx -> words
print ""
#print rnn.classify((np.asarray(contextwin(train_lex[0], params['win'])).astype('int32')), train_likes[0], params['win'])
#print rnn.classify(train_lex[0], params['win'], extra_input_dims, train_extra[0])
#print rnn.get_element_weights(np.asarray(contextwin(train_lex[0], params['win'])).astype('int32'))
#if params['pooling_method'] == 'attention1' or params['pooling_method'] == 'attention2':
# if extra_input_dims == 0:
# r = np.random.randint(0, len(train_lex))
# print r, rnn.a_sum_check(np.asarray(contextwin(train_lex[r], params['win'])).astype('int32'))
"""
predictions_train = [np.max(rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32')), axis=0)
for x in train_lex]
predictions_test = [np.max(rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32')), axis=0)
for x in test_lex]
predictions_valid = [np.max(rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32')), axis=0)
for x in valid_lex]
"""
#predictions_train = [rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32'), likes) for x in train_lex]
#predictions_test = [rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32'), likes) for x in test_lex]
#predictions_valid = [rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32'), likes) for x in valid_lex]
predictions_train = [rnn.classify(x, params['win'],
extra_input_dims, train_extra[i]) for i, x in enumerate(train_lex)]
predictions_test = [rnn.classify(x, params['win'],
extra_input_dims, test_extra[i]) for i, x in enumerate(test_lex)]
predictions_valid = [rnn.classify(x, params['win'],
extra_input_dims, dev_extra[i]) for i, x in enumerate(valid_lex)]
train_f1 = common.calc_mean_f1(predictions_train, train_y)
test_f1 = common.calc_mean_f1(predictions_test, test_y)
valid_f1 = common.calc_mean_f1(predictions_valid, valid_y)
if reuser is not None:
valid_f1 = reuser.mask_value(valid_f1, train_f1)
question_f1s = []
question_pps = []
print "train_f1 =", train_f1, "valid_f1 =", valid_f1, "test_f1 =", test_f1
results.append((train_f1, valid_f1, test_f1))
if valid_f1 > best_f1:
best_rnn = copy.deepcopy(rnn)
best_f1 = valid_f1
best_test_predictions = predictions_test
if params['verbose']:
print('NEW BEST: epoch', e,
'valid f1', valid_f1,
'best test f1', test_f1)
params['tr_f1'] = train_f1
params['te_f1'] = test_f1
params['v_f1'] = valid_f1
params['be'] = e # store the current epoch as a new best
# learning rate decay if no improvement in a given number of epochs
if abs(params['be']-params['ce']) >= params['decay_delay']:
params['clr'] *= params['decay_factor']
params['be'] = params['ce']
print "Reverting to current best; new learning rate = ", params['clr']
# also reset to the previous best
rnn = best_rnn
if params['clr'] < 1e-5:
break
if best_f1 == 1.0:
break
if best_f1 == 0 and e > 10:
break
if params['save_model']:
predictions_valid = [rnn.classify(x, params['win'],
extra_input_dims, dev_extra[i]) for i, x in enumerate(valid_lex)]
#predictions_valid = [best_rnn.classify(np.asarray(contextwin(x, params['win'])).astype('int32')) for x in valid_lex]
best_rnn.save(output_dir)
common.write_predictions(datasets, params['test_fold'], dev_fold, predictions_valid, dev_items, output_dir)
print('BEST RESULT: epoch', params['be'],
'train F1 ', params['tr_f1'],
'valid F1', params['v_f1'],
'best test F1', params['te_f1'],
'with the model', output_dir)
best_valid_f1s.append(params['v_f1'])
best_test_f1s.append(params['te_f1'])
test_prediction_arrays.append(np.array(best_test_predictions, dtype=int))
output_filename = fh.make_filename(output_dir, 'results', 'txt')
with codecs.open(output_filename, 'w') as output_file:
for e, result in enumerate(results):
output_file.write('epoch=' + str(e) + '; train_f1=' + str(result[0]) +
'; valid_f1=' + str(result[1]) + '; test_f1=' + str(result[2]) + '\n')
if params['ensemble']:
test_predictions_stack = np.dstack(test_prediction_arrays)
final_predictions = stats.mode(test_predictions_stack, axis=2)[0][:, :, 0]
predicted_df = pd.DataFrame(final_predictions, index=test_items, columns=codes)
true_df = pd.DataFrame(np.array(test_y), index=test_items, columns=codes)
final_test_f1, final_test_pp = evaluation.calc_macro_mean_f1_pp(true_df, predicted_df)
else:
final_test_f1 = np.median(best_test_f1s)
return {'loss': -np.median(best_valid_f1s),
'final_test_f1': final_test_f1,
'valid_f1s': best_valid_f1s,
'test_f1s': best_test_f1s,
'status': STATUS_OK
}
if __name__ == '__main__':
report = main()
print report
|
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
# for file in files:
# if file.endswith("%s.wgt" % PKG_NAME):
# if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
# action_status = False
# (return_code, output) = doRemoteCMD(
# "pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
# doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
# for line in output:
# if "Failure" in line:
# action_status = False
# break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/%s" % (PKG_SRC_DIR, item_name)):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
|
#!/usr/bin/env python
# coding: utf-8
import math
import socket
import random
import string
import regex2dfa
import fte.encoder
import fte.bit_ops
import re
import marionette_tg.record_layer
def send(channel, marionette_state, input_args):
grammar = input_args[0]
ctxt = generate_template(grammar)
for handler_key in conf[grammar]["handler_order"]:
ctxt = execute_handler_sender(
marionette_state,
grammar,
handler_key,
ctxt,
marionette_state.get_global("multiplexer_outgoing"))
ctxt_len = len(ctxt)
while len(ctxt) > 0:
try:
bytes_sent = channel.send(ctxt)
ctxt = ctxt[bytes_sent:]
except socket.timeout:
continue
retval = (ctxt_len == bytes_sent)
return retval
def recv(channel, marionette_state, input_args):
retval = False
grammar = input_args[0]
try:
ctxt = channel.recv()
if parser(grammar, ctxt):
cell_str = ''
for handler_key in conf[grammar]["handler_order"]:
tmp_str = execute_handler_receiver(marionette_state,
grammar, handler_key, ctxt)
if tmp_str:
cell_str += tmp_str
if not cell_str:
retval = True
else:
##
cell_obj = marionette_tg.record_layer.unserialize(cell_str)
assert cell_obj.get_model_uuid() == marionette_state.get_local(
"model_uuid")
marionette_state.set_local(
"model_instance_id", cell_obj.get_model_instance_id())
##
if marionette_state.get_local("model_instance_id"):
marionette_state.get_global(
"multiplexer_incoming").push(cell_str)
retval = True
except socket.timeout as e:
pass
except socket.error as e:
pass
except marionette_tg.record_layer.UnserializeException as e:
pass
if not retval:
channel.rollback()
return retval
def get_grammar_capacity(grammar):
retval = 0
for handler_key in conf[grammar]["handler_order"]:
retval += conf[grammar]['handlers'][handler_key].capacity()
retval /= 8.0
return retval
# handler + (un)embed functions
def do_embed(grammar, template, handler_key, value):
if template.count("%%" + handler_key + "%%") == 0:
# handler not in template, no need to execute
pass
elif template.count("%%" + handler_key + "%%") == 1:
template = template.replace("%%" + handler_key + "%%", value)
else:
# don't know how to handle >1 handlers, yet
assert False
return template
def do_unembed(grammar, ctxt, handler_key):
parse_tree = parser(grammar, ctxt)
return parse_tree[handler_key]
def execute_handler_sender(marionette_state, grammar, handler_key,
template, multiplexer):
to_execute = conf[grammar]["handlers"][handler_key]
cell_len_in_bits = to_execute.capacity()
to_embed = ''
if cell_len_in_bits > 0:
cell = multiplexer.pop(marionette_state.get_local("model_uuid"),
marionette_state.get_local("model_instance_id"),
cell_len_in_bits)
to_embed = cell.to_string()
value_to_embed = to_execute.encode(marionette_state, template, to_embed)
template = do_embed(grammar, template, handler_key, value_to_embed)
return template
def execute_handler_receiver(marionette_state, grammar, handler_key,
ctxt):
ptxt = ''
to_execute = conf[grammar]["handlers"][handler_key]
handler_key_value = do_unembed(grammar, ctxt, handler_key)
ptxt = to_execute.decode(marionette_state, handler_key_value)
return ptxt
# handlers
regex_cache_ = {}
fte_cache_ = {}
class RankerHandler(object):
def __init__(self, regex, msg_len):
self.regex_ = regex
regex_key = regex + str(msg_len)
if not regex_cache_.get(regex_key):
dfa = regex2dfa.regex2dfa(regex)
cDFA = fte.cDFA.DFA(dfa, msg_len)
encoder = fte.dfa.DFA(cDFA, msg_len)
regex_cache_[regex_key] = (dfa, encoder)
(self.dfa_, self.encoder_) = regex_cache_[regex_key]
def capacity(self):
cell_len_in_bytes = int(math.floor(self.encoder_.getCapacity() / 8.0))
cell_len_in_bits = cell_len_in_bytes * 8
return cell_len_in_bits
def encode(self, marionette_state, template, to_embed):
to_embed_as_int = fte.bit_ops.bytes_to_long(to_embed)
ctxt = self.encoder_.unrank(to_embed_as_int)
return ctxt
def decode(self, marionette_state, ctxt):
try:
ptxt = self.encoder_.rank(ctxt)
ptxt = fte.bit_ops.long_to_bytes(ptxt, self.capacity() / 8)
except Exception as e:
pass
return ptxt
class FteHandler(object):
def __init__(self, regex, msg_len):
self.regex_ = regex
fte_key = regex + str(msg_len)
if not fte_cache_.get(fte_key):
dfa = regex2dfa.regex2dfa(regex)
encrypter = fte.encoder.DfaEncoder(dfa, msg_len)
fte_cache_[fte_key] = (dfa, encrypter)
(self.dfa_, self.fte_encrypter_) = fte_cache_[fte_key]
def capacity(self):
if self.regex_.endswith(".+"):
retval = (2 ** 18) * 8
else:
cell_len_in_bytes = int(math.floor(self.fte_encrypter_.getCapacity(
) / 8.0)) - fte.encoder.DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT - fte.encrypter.Encrypter._CTXT_EXPANSION
cell_len_in_bits = cell_len_in_bytes * 8
retval = cell_len_in_bits
return retval
def encode(self, marionette_state, template, to_embed):
ctxt = self.fte_encrypter_.encode(to_embed)
return ctxt
def decode(self, marionette_state, ctxt):
try:
retval = self.fte_encrypter_.decode(ctxt)
ptxt = retval[0]
except Exception as e:
pass
return ptxt
class FteMsgLensHandler(FteHandler):
def capacity(self):
cell_len_in_bytes = int(math.floor(self.fte_encrypter_.getCapacity(
) / 8.0)) - fte.encoder.DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT - fte.encrypter.Encrypter._CTXT_EXPANSION
cell_len_in_bits = cell_len_in_bytes * 8
return cell_len_in_bits
class HttpContentLengthHandler(object):
def capacity(self):
return 0
def encode(self, marionette_state, template, to_embed):
http_body_length = str(len(template.split("\r\n\r\n")[1]))
return http_body_length
def decode(self, marionette_state, ctxt):
return None
class Pop3ContentLengthHandler(object):
def capacity(self):
return 0
def encode(self, marionette_state, template, to_embed):
pop3_body_length = str(len('\n'.join(template.split("\n")[1:])))
return pop3_body_length
def decode(self, marionette_state, ctxt):
return None
class SetFTPPasvX(object):
def capacity(self):
return 0
def encode(self, marionette_state, template, to_embed):
ftp_pasv_port = marionette_state.get_local("ftp_pasv_port")
ftp_pasv_port_x = int(math.floor(ftp_pasv_port / 256.0))
return str(ftp_pasv_port_x)
def decode(self, marionette_state, ctxt):
marionette_state.set_local("ftp_pasv_port_x", int(ctxt))
return None
class SetFTPPasvY(object):
def capacity(self):
return 0
def encode(self, marionette_state, template, to_embed):
ftp_pasv_port = marionette_state.get_local("ftp_pasv_port")
ftp_pasv_port_y = ftp_pasv_port % 256
return str(ftp_pasv_port_y)
def decode(self, marionette_state, ctxt):
ftp_pasv_port_x = marionette_state.get_local("ftp_pasv_port_x")
ftp_pasv_port_y = int(ctxt)
ftp_pasv_port = ftp_pasv_port_x * 256 + ftp_pasv_port_y
marionette_state.set_local("ftp_pasv_port", ftp_pasv_port)
return None
class SetDnsTransactionId(object):
def capacity(self):
return 0
def encode(self, marionette_state, template, to_embed):
dns_transaction_id = None
if marionette_state.get_local("dns_transaction_id"):
dns_transaction_id = marionette_state.get_local("dns_transaction_id")
else:
dns_transaction_id = str(chr(random.randint(1,254)))+str(chr(random.randint(1,254)))
marionette_state.set_local("dns_transaction_id", dns_transaction_id)
return str(dns_transaction_id)
def decode(self, marionette_state, ctxt):
marionette_state.set_local("dns_transaction_id", ctxt)
return None
class SetDnsDomain(object):
def capacity(self):
return 0
def encode(self, marionette_state, template, to_embed):
dns_domain = None
if marionette_state.get_local("dns_domain"):
dns_domain = marionette_state.get_local("dns_domain")
else:
dns_domain_len = random.randint(3,63)
dns_domain = chr(dns_domain_len) + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(dns_domain_len)) + "\x03" + random.choice(['com', 'net', 'org'])
marionette_state.set_local("dns_domain", dns_domain)
return str(dns_domain)
def decode(self, marionette_state, ctxt):
marionette_state.set_local("dns_domain", ctxt)
return None
class SetDnsIp(object):
def capacity(self):
return 0
def encode(self, marionette_state, template, to_embed):
dns_ip = None
if marionette_state.get_local("dns_ip"):
dns_ip = marionette_state.get_local("dns_ip")
else:
dns_ip = str(chr(random.randint(1,254)))+str(chr(random.randint(1,254)))+str(chr(random.randint(1,254)))+str(chr(random.randint(1,254)))
marionette_state.set_local("dns_ip", dns_ip)
return str(dns_ip)
def decode(self, marionette_state, ctxt):
marionette_state.set_local("dns_ip", ctxt)
return None
class AmazonMsgLensHandler(FteHandler):
def capacity(self):
amazon_msg_lens = {
2049: 1,
2052: 2,
2054: 2,
2057: 3,
2058: 2,
2059: 1,
2065: 1,
17429: 1,
3098: 1,
687: 3,
2084: 1,
42: 58,
43: 107,
9260: 1,
11309: 1,
11829: 1,
9271: 1,
6154: 1,
64: 15,
1094: 1,
12376: 1,
89: 1,
10848: 1,
5223: 1,
69231: 1,
7795: 1,
2678: 1,
8830: 1,
29826: 1,
16006: 10,
8938: 1,
17055: 2,
87712: 1,
23202: 1,
7441: 1,
17681: 1,
12456: 1,
41132: 1,
25263: 6,
689: 1,
9916: 1,
10101: 2,
1730: 1,
10948: 1,
26826: 1,
6357: 1,
13021: 2,
1246: 4,
19683: 1,
1765: 1,
1767: 1,
1768: 1,
1769: 4,
1770: 6,
1771: 3,
1772: 2,
1773: 4,
1774: 4,
1775: 1,
1776: 1,
1779: 1,
40696: 1,
767: 1,
17665: 1,
27909: 1,
12550: 1,
5385: 1,
16651: 1,
5392: 1,
26385: 1,
12056: 1,
41245: 2,
13097: 1,
15152: 1,
310: 1,
40759: 1,
9528: 1,
8000: 7,
471: 1,
15180: 1,
14158: 3,
37719: 2,
1895: 1,
31082: 1,
19824: 1,
30956: 1,
18807: 1,
11095: 1,
37756: 2,
746: 1,
10475: 1,
4332: 1,
35730: 1,
11667: 1,
16788: 1,
12182: 4,
39663: 1,
9126: 1,
35760: 1,
12735: 1,
6594: 1,
451: 15,
19402: 1,
463: 3,
10193: 1,
16853: 6,
982: 1,
15865: 1,
2008: 2,
476: 1,
13655: 1,
10213: 1,
10737: 1,
15858: 1,
2035: 6,
2039: 1,
2041: 2
}
lens = []
for key in amazon_msg_lens:
lens += [key] * amazon_msg_lens[key]
target_len_in_bytes = random.choice(lens)
#target_len_in_bytes -= fte.encoder.DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT
#target_len_in_bytes -= fte.encrypter.Encrypter._CTXT_EXPANSION
target_len_in_bits = target_len_in_bytes * 8.0
target_len_in_bits = int(target_len_in_bits)
return target_len_in_bits
# formats
conf = {}
conf["http_request_keep_alive"] = {
"grammar": "http_request_keep_alive",
"handler_order": ["URL"],
"handlers": {"URL": RankerHandler("[a-zA-Z0-9\?\-\.\&]+", 2048), }
}
conf["http_response_keep_alive"] = {
"grammar": "http_response_keep_alive",
"handler_order": [ # "COOKIE",
"HTTP-RESPONSE-BODY", "CONTENT-LENGTH"
],
"handlers": {
"CONTENT-LENGTH": HttpContentLengthHandler(),
"COOKIE": RankerHandler("([a-zA-Z0-9]+=[a-zA-Z0-9]+;)+", 128),
"HTTP-RESPONSE-BODY": FteHandler(".+", 128),
}
}
conf["http_request_close"] = {
"grammar": "http_request_close",
"handler_order": ["URL"],
"handlers": {"URL": RankerHandler("[a-zA-Z0-9\?\-\.\&]+", 2048), }
}
conf["http_response_close"] = {
"grammar": "http_response_close",
"handler_order": [ # "COOKIE",
"HTTP-RESPONSE-BODY", "CONTENT-LENGTH"
],
"handlers": {
"CONTENT-LENGTH": HttpContentLengthHandler(),
"COOKIE": RankerHandler("([a-zA-Z0-9]+=[a-zA-Z0-9]+;)+", 128),
"HTTP-RESPONSE-BODY": FteHandler(".+", 128),
}
}
conf["pop3_message_response"] = {
"grammar": "pop3_message_response",
"handler_order": ["POP3-RESPONSE-BODY", "CONTENT-LENGTH"],
"handlers": {
"CONTENT-LENGTH": Pop3ContentLengthHandler(),
"POP3-RESPONSE-BODY": RankerHandler("[a-zA-Z0-9]+", 2048),
}
}
conf["pop3_password"] = {
"grammar": "pop3_password",
"handler_order": ["PASSWORD"],
"handlers": {"PASSWORD": RankerHandler("[a-zA-Z0-9]+", 256), }
}
conf["http_request_keep_alive_with_msg_lens"] = {
"grammar": "http_request_keep_alive",
"handler_order": ["URL"],
"handlers": {"URL": FteMsgLensHandler("[a-zA-Z0-9\?\-\.\&]+", 2048), }
}
conf["http_response_keep_alive_with_msg_lens"] = {
"grammar": "http_response_keep_alive",
"handler_order": ["HTTP-RESPONSE-BODY", "CONTENT-LENGTH"],
"handlers": {
"CONTENT-LENGTH": HttpContentLengthHandler(),
"HTTP-RESPONSE-BODY": FteMsgLensHandler(".+", 2048),
}
}
conf["http_amazon_request"] = {
"grammar": "http_request_keep_alive",
"handler_order": ["URL"],
"handlers": {"URL": RankerHandler("[a-zA-Z0-9\?\-\.\&]+", 2048), }
}
conf["http_amazon_response"] = {
"grammar": "http_response_keep_alive",
"handler_order": ["HTTP-RESPONSE-BODY", "CONTENT-LENGTH"],
"handlers": {
"CONTENT-LENGTH": HttpContentLengthHandler(),
"HTTP-RESPONSE-BODY": AmazonMsgLensHandler(".+", 96),
}
}
conf["ftp_entering_passive"] = {
"grammar": "ftp_entering_passive",
"handler_order": ["FTP_PASV_PORT_X", "FTP_PASV_PORT_Y"],
"handlers": {
"FTP_PASV_PORT_X": SetFTPPasvX(),
"FTP_PASV_PORT_Y": SetFTPPasvY(),
}
}
conf["dns_request"] = {
"grammar": "dns_request",
"handler_order": ["DNS_TRANSACTION_ID", "DNS_DOMAIN"],
"handlers": {
"DNS_TRANSACTION_ID": SetDnsTransactionId(),
"DNS_DOMAIN": SetDnsDomain(),
}
}
conf["dns_response"] = {
"grammar": "dns_response",
"handler_order": ["DNS_TRANSACTION_ID", "DNS_DOMAIN", "DNS_IP"],
"handlers": {
"DNS_TRANSACTION_ID": SetDnsTransactionId(),
"DNS_DOMAIN": SetDnsDomain(),
"DNS_IP": SetDnsIp(),
}
}
# grammars
def parser(grammar, msg):
if grammar.startswith(
"http_response") or grammar == "http_amazon_response":
return http_response_parser(msg)
elif grammar.startswith("http_request") or grammar == "http_amazon_request":
return http_request_parser(msg)
elif grammar.startswith("pop3_message_response"):
return pop3_parser(msg)
elif grammar.startswith("pop3_password"):
return pop3_password_parser(msg)
elif grammar.startswith("ftp_entering_passive"):
return ftp_entering_passive_parser(msg)
elif grammar.startswith("dns_request"):
return dns_request_parser(msg)
elif grammar.startswith("dns_response"):
return dns_response_parser(msg)
def generate_template(grammar):
return random.choice(templates[grammar])
#############
templates = {}
server_listen_ip = marionette_tg.conf.get("server.server_ip")
templates["http_request_keep_alive"] = [
"GET http://" +
server_listen_ip +
":8080/%%URL%% HTTP/1.1\r\nUser-Agent: marionette 0.1\r\nConnection: keep-alive\r\n\r\n",
]
templates["http_request_close"] = [
"GET http://" +
server_listen_ip +
":8080/%%URL%% HTTP/1.1\r\nUser-Agent: marionette 0.1\r\nConnection: close\r\n\r\n",
]
templates["http_response_keep_alive"] = [
"HTTP/1.1 200 OK\r\nContent-Length: %%CONTENT-LENGTH%%\r\nConnection: keep-alive\r\n\r\n%%HTTP-RESPONSE-BODY%%",
"HTTP/1.1 404 Not Found\r\nContent-Length: %%CONTENT-LENGTH%%\r\nConnection: keep-alive\r\n\r\n%%HTTP-RESPONSE-BODY%%",
]
templates["http_response_close"] = [
"HTTP/1.1 200 OK\r\nContent-Length: %%CONTENT-LENGTH%%\r\nConnection: close\r\n\r\n%%HTTP-RESPONSE-BODY%%",
"HTTP/1.1 404 Not Found\r\nContent-Length: %%CONTENT-LENGTH%%\r\nConnection: close\r\n\r\n%%HTTP-RESPONSE-BODY%%",
]
templates["pop3_message_response"] = [
"+OK %%CONTENT-LENGTH%% octets\nReturn-Path: [email protected]\nReceived: from client.example.com ([192.0.2.1])\nFrom: [email protected]\nSubject: Test message\nTo: [email protected]\n\n%%POP3-RESPONSE-BODY%%\n.\n",
]
templates["pop3_password"] = ["PASS %%PASSWORD%%\n", ]
templates["http_request_keep_alive_with_msg_lens"] = templates[
"http_request_keep_alive"]
templates["http_response_keep_alive_with_msg_lens"] = templates[
"http_response_keep_alive"]
templates["http_amazon_request"] = templates["http_request_keep_alive"]
templates["http_amazon_response"] = templates["http_response_keep_alive"]
templates["ftp_entering_passive"] = [
"227 Entering Passive Mode (127,0,0,1,%%FTP_PASV_PORT_X%%,%%FTP_PASV_PORT_Y%%).\n",
]
templates["dns_request"] = [
"%%DNS_TRANSACTION_ID%%\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00%%DNS_DOMAIN%%\x00\x00\x01\x00\x01",
]
templates["dns_response"] = [
"%%DNS_TRANSACTION_ID%%\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00%%DNS_DOMAIN%%\x00\x01\x00\x01\xc0\x0c\x00\x01\x00\x01\x00\x00\x00\x02\x00\x04%%DNS_IP%%",
]
def get_http_header(header_name, msg):
retval = None
message_lines = msg.split("\r\n")
for line in message_lines[1:-2]:
line_compontents = line.partition(": ")
if line_compontents[0] == header_name:
retval = line_compontents[-1]
break
return retval
def http_request_parser(msg):
if not msg.startswith("GET"):
return None
retval = {}
if msg.startswith("GET http"):
retval["URL"] = '/'.join(msg.split('\r\n')[0][:-9].split('/')[3:])
else:
retval["URL"] = '/'.join(msg.split('\r\n')[0][:-9].split('/')[1:])
if not msg.endswith("\r\n\r\n"):
retval = None
return retval
def http_response_parser(msg):
if not msg.startswith("HTTP"):
return None
retval = {}
retval["CONTENT-LENGTH"] = int(get_http_header("Content-Length", msg))
retval["COOKIE"] = get_http_header("Cookie", msg)
try:
retval["HTTP-RESPONSE-BODY"] = msg.split("\r\n\r\n")[1]
except:
retval["HTTP-RESPONSE-BODY"] = ''
if retval["CONTENT-LENGTH"] != len(retval["HTTP-RESPONSE-BODY"]):
retval = None
return retval
def pop3_parser(msg):
retval = {}
try:
retval["POP3-RESPONSE-BODY"] = msg.split('\n\n')[1]
assert retval["POP3-RESPONSE-BODY"].endswith('\n.\n')
retval["POP3-RESPONSE-BODY"] = retval["POP3-RESPONSE-BODY"][:-3]
retval["CONTENT-LENGTH"] = len(retval["POP3-RESPONSE-BODY"])
except Exception as e:
pass
retval = {}
return retval
def pop3_password_parser(msg):
retval = {}
try:
assert msg.endswith('\n')
retval["PASSWORD"] = msg[5:-1]
except Exception as e:
retval = {}
return retval
def ftp_entering_passive_parser(msg):
retval = {}
try:
assert msg.startswith("227 Entering Passive Mode (")
assert msg.endswith(").\n")
bits = msg.split(',')
retval['FTP_PASV_PORT_X'] = int(bits[4])
retval['FTP_PASV_PORT_Y'] = int(bits[5][:-3])
except Exception as e:
retval = {}
return retval
def validate_dns_domain(msg, dns_response=False):
if dns_response:
expected_splits = 3
split1_msg = '\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00'
else:
expected_splits = 2
split1_msg = '\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00'
tmp_domain_split1 = msg.split(split1_msg)
if len(tmp_domain_split1) != 2:
return None
tmp_domain_split2 = tmp_domain_split1[1].split('\x00\x01\x00\x01')
if len(tmp_domain_split2) != expected_splits:
return None
tmp_domain = tmp_domain_split2[0]
# Check for valid prepended length
# Remove trailing tld prepended length (1), tld (3) and trailing null (1) = 5
if ord(tmp_domain[0]) != len(tmp_domain[1:-5]):
return None
if ord(tmp_domain[-5]) != 3:
return None
# Check for valid TLD
if not re.search("(com|net|org)\x00$", tmp_domain):
return None
# Check for valid domain characters
if not re.match("^[\w\d]+$", tmp_domain[1:-5]):
return None
return tmp_domain
def validate_dns_ip(msg):
tmp_ip_split = msg.split('\x00\x01\x00\x01\xc0\x0c\x00\x01\x00\x01\x00\x00\x00\x02\x00\x04')
if len(tmp_ip_split) != 2:
return None
tmp_ip = tmp_ip_split[1]
if len(tmp_ip) != 4:
return None
return tmp_ip
def dns_request_parser(msg):
retval = {}
if '\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00' not in msg:
return retval
try:
# Nothing to validate for Transaction ID
retval["DNS_TRANSACTION_ID"] = msg[:2]
tmp_domain = validate_dns_domain(msg)
if not tmp_domain:
raise Exception("Bad DNS Domain")
retval["DNS_DOMAIN"] = tmp_domain
except Exception as e:
retval = {}
return retval
def dns_response_parser(msg):
retval = {}
if '\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00' not in msg:
return retval
try:
# Nothing to validate for Transaction ID
retval["DNS_TRANSACTION_ID"] = msg[:2]
tmp_domain = validate_dns_domain(msg, dns_response=True)
if not tmp_domain:
raise Exception("Bad DNS Domain")
retval["DNS_DOMAIN"] = tmp_domain
tmp_ip = validate_dns_ip(msg)
if not tmp_ip:
raise Exception("Bad DNS IP")
retval["DNS_IP"] = tmp_ip
except Exception as e:
retval = {}
return retval
|
|
#!/usr/bin/env python3.6
# Copyright 2017 WSO2 Inc. (http://wso2.org)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Create comparison charts from two summary.csv files
# ----------------------------------------------------------------------------
import pandas as pd
import seaborn as sns
import sys
import apimchart
sns.set_style("darkgrid")
summary_files = []
names = []
summary_count = 0
def usage():
print(sys.argv[0] + " summary1.csv name1 summary2.csv name2 <summary3.csv> <name3> ... ...")
def main():
global summary_files
global names
global summary_count
args = sys.argv[1:]
args_count = len(args)
if args_count < 4:
print("Please provide arguments at least two summary files with names")
usage()
sys.exit(1)
if args_count % 2 != 0:
print("Please provide a name for each summary file")
usage()
sys.exit(1)
summary_count = args_count // 2
for index in range(0, args_count, 2):
summary_files.append(args[index])
names.append(args[index + 1])
df, df_all = read_summary_csv_files()
save_single_comparison_plots(df)
save_comparison_plots(df)
save_point_plots(df_all)
save_lmplots(df_all)
print("Done")
def add_suffix(string, suffix):
return string + " - " + suffix
def read_summary_csv_files():
print("Reading " + summary_files[0] + " with name " + names[0])
# DataFrame to merge all data
df_merge = pd.read_csv(summary_files[0])
# Filter errors
df_merge = df_merge.loc[df_merge['Error Count'] < 100]
# DataFrame to append all data
df_all = df_merge.copy()
df_all.insert(0, 'Name', names[0])
keys = ['Message Size (Bytes)', 'Sleep Time (ms)', 'Concurrent Users']
for i in range(1, summary_count):
print("Reading " + summary_files[i] + " with name " + names[i] + " to merge and append")
df_read = pd.read_csv(summary_files[i])
# Filter errors
df_read = df_read.loc[df_read['Error Count'] < 100]
if i == summary_count - 1:
# Add suffixes to new right columns. Add suffixes to left columns using the first summary name
suffixes = [add_suffix('', names[0]), add_suffix('', names[i])]
else:
# Add suffixes to new right columns. Keep the left column names unchanged till the last summary file.
suffixes = ['', add_suffix('', names[i])]
# Merge
df_merge = df_merge.merge(df_read, on=keys, how='outer', suffixes=suffixes)
# Append data frame
df_to_concat = df_read.copy()
df_to_concat.insert(0, 'Name', names[i])
df_all = df_all.append(df_to_concat, ignore_index=True)
# Save all data frame
df_all.to_csv('all_results.csv')
# Format message size values
df_merge['Message Size (Bytes)'] = df_merge['Message Size (Bytes)'].map(apimchart.format_bytes)
return df_merge, df_all
def save_lmplots(df_all):
# Save lmplots
xcolumns = ['Message Size (Bytes)', 'Sleep Time (ms)', 'Concurrent Users']
xcharts = ['message_size', 'sleep_time', 'concurrent_users']
ycolumns = ['Throughput', 'Average (ms)', 'Max (ms)', '90th Percentile (ms)', '95th Percentile (ms)',
'99th Percentile (ms)', 'API Manager GC Throughput (%)', 'API Manager Load Average - Last 1 minute',
'API Manager Load Average - Last 5 minutes', 'API Manager Load Average - Last 15 minutes']
ycharts = ['lmplot_throughput', 'lmplot_average_time', 'lmplot_max_time', 'lmplot_p90', 'lmplot_p95', 'lmplot_p99',
'lmplot_gc_throughput', 'lmplot_loadavg_1', 'lmplot_loadavg_5', 'lmplot_loadavg_15']
ylabels = ['Throughput (Requests/sec)', 'Average Response Time (ms)', 'Maximum Response Time (ms)',
'90th Percentile (ms)', '95th Percentile (ms)', '99th Percentile (ms)', 'API Manager GC Throughput (%)',
'API Manager Load Average - Last 1 minute', 'API Manager Load Average - Last 5 minutes',
'API Manager Load Average - Last 15 minutes']
for ycolumn, ylabel, ychart in zip(ycolumns, ylabels, ycharts):
for xcolumn, xchart in zip(xcolumns, xcharts):
chart = ychart + '_vs_' + xchart
title = ylabel + ' vs ' + xcolumn
apimchart.save_lmplot(df_all, chart, xcolumn, ycolumn, title, ylabel=ylabel)
apimchart.save_lmplot(df_all, chart + '_with_hue', xcolumn, ycolumn, title, hue='Name', ylabel=ylabel)
def save_point_plots(df_all):
unique_sleep_times_in_df_all = df_all['Sleep Time (ms)'].unique()
unique_message_sizes_in_df_all = df_all['Message Size (Bytes)'].unique()
for sleep_time in unique_sleep_times_in_df_all:
for message_size in unique_message_sizes_in_df_all:
df_filtered = df_all.loc[
(df_all['Message Size (Bytes)'] == message_size) & (df_all['Sleep Time (ms)'] == sleep_time)]
chart_suffix = '_' + apimchart.format_time(sleep_time) + '_' + apimchart.format_bytes(message_size)
title_suffix = ' vs Concurrent Users for ' + apimchart.format_bytes(
message_size) + ' messages with ' + apimchart.format_time(sleep_time) + ' backend delay'
ycolumns = ['Throughput', 'Average (ms)', 'Max (ms)', '90th Percentile (ms)', '95th Percentile (ms)',
'99th Percentile (ms)', 'API Manager GC Throughput (%)']
charts = ['throughput', 'average_time', 'max_time', 'p90', 'p95', 'p99', 'gc_throughput']
ylabels = ['Throughput (Requests/sec)', 'Average Response Time (ms)', 'Maximum Response Time (ms)',
'90th Percentile (ms)', '95th Percentile (ms)', '99th Percentile (ms)', 'GC Throughput (%)']
for ycolumn, ylabel, chart in zip(ycolumns, ylabels, charts):
apimchart.save_point_plot(df_filtered, chart + chart_suffix, 'Concurrent Users', ycolumn,
ylabel + title_suffix, hue='Name', ylabel=ylabel)
def save_multi_columns_categorical_charts(df, chart, sleep_time, columns, y, hue, title, kind='point'):
comparison_columns = []
for column in columns:
for name in names:
comparison_columns.append(add_suffix(column, name))
apimchart.save_multi_columns_categorical_charts(df.loc[df['Sleep Time (ms)'] == sleep_time],
chart + "_" + str(sleep_time) + "ms", comparison_columns, y, hue,
title, len(columns) == 1, columns[0], kind)
def save_bar_plot(df, chart, sleep_time, message_size, columns, y, hue, title):
comparison_columns = []
for column in columns:
for name in names:
comparison_columns.append(add_suffix(column, name))
df_results = df.loc[(df['Message Size (Bytes)'] == message_size) & (df['Sleep Time (ms)'] == sleep_time)]
all_columns = ['Message Size (Bytes)', 'Concurrent Users']
all_columns.extend(comparison_columns)
df_results = df_results[all_columns]
df_results = df_results.set_index(['Message Size (Bytes)', 'Concurrent Users']).stack().reset_index().rename(
columns={'level_2': hue, 0: y})
apimchart.save_bar_plot(df_results, chart, 'Concurrent Users', y, title, hue=hue)
def save_comparison_plots(df):
unique_sleep_times_in_df = df['Sleep Time (ms)'].unique()
unique_message_sizes_in_df = df['Message Size (Bytes)'].unique()
for sleep_time in unique_sleep_times_in_df:
save_multi_columns_categorical_charts(df, "comparison_thrpt", sleep_time, ['Throughput'],
"Throughput (Requests/sec)", "API Manager",
"Throughput vs Concurrent Users for " + str(
sleep_time) + "ms backend delay")
save_multi_columns_categorical_charts(df, "comparison_avgt", sleep_time, ['Average (ms)'],
"Average Response Time (ms)", "API Manager",
"Average Response Time vs Concurrent Users for " + str(
sleep_time) + "ms backend delay")
save_multi_columns_categorical_charts(df, "comparison_response_time", sleep_time,
['90th Percentile (ms)', '95th Percentile (ms)',
'99th Percentile (ms)'],
"Response Time (ms)", "API Manager",
"Response Time Percentiles for " + str(sleep_time) + "ms backend delay",
kind='bar')
save_multi_columns_categorical_charts(df, "comparison_loadavg", sleep_time,
['API Manager Load Average - Last 1 minute',
'API Manager Load Average - Last 5 minutes',
'API Manager Load Average - Last 15 minutes'],
"Load Average", "API Manager",
"Load Average with " + str(sleep_time) + "ms backend delay")
save_multi_columns_categorical_charts(df, "comparison_network", sleep_time,
['Received (KB/sec)', 'Sent (KB/sec)'],
"Network Throughput (KB/sec)", "Network",
"Network Throughput with " + str(sleep_time) + "ms backend delay")
save_multi_columns_categorical_charts(df, "comparison_gc", sleep_time, ['API Manager GC Throughput (%)'],
"GC Throughput (%)", "API Manager",
"GC Throughput with " + str(sleep_time) + "ms backend delay")
for message_size in unique_message_sizes_in_df:
chart_suffix = '_' + apimchart.format_time(sleep_time) + '_' + message_size
title_suffix = " for " + message_size + " messages with " + apimchart.format_time(
sleep_time) + " backend delay"
save_bar_plot(df, 'response_time' + chart_suffix, sleep_time, message_size,
['90th Percentile (ms)', '95th Percentile (ms)', '99th Percentile (ms)'],
'Response Time (ms)', 'Summary',
"Response Time Percentiles" + title_suffix)
save_bar_plot(df, 'loadavg' + chart_suffix, sleep_time, message_size,
['API Manager Load Average - Last 1 minute',
'API Manager Load Average - Last 5 minutes',
'API Manager Load Average - Last 15 minutes'],
"Load Average", "API Manager",
"Load Average" + title_suffix)
def merge_all_sleep_time_and_concurrent_users(df):
unique_message_sizes = df['Message Size (Bytes)'].unique()
keys = ['Sleep Time (ms)', 'Concurrent Users']
first_message_size = unique_message_sizes[0]
other_message_sizes = unique_message_sizes[1:]
print("Creating DataFrame with " + first_message_size + " message size")
df_merge = df[df['Message Size (Bytes)'] == first_message_size]
del df_merge['Message Size (Bytes)']
for message_size, i in zip(other_message_sizes, range(0, len(other_message_sizes))):
print("Merging data for " + message_size + " message size")
df_filtered = df[df['Message Size (Bytes)'] == message_size]
del df_filtered['Message Size (Bytes)']
if i == len(other_message_sizes) - 1:
# Add suffixes to new right columns. Add suffixes to left columns using the first summary name
suffixes = [add_suffix('', first_message_size),
add_suffix('', message_size)]
else:
# Add suffixes to new right columns. Keep the left column names unchanged till the last summary file.
suffixes = ['', add_suffix('', message_size)]
# Merge
df_merge = df_merge.merge(df_filtered, on=keys, how='outer', suffixes=suffixes)
return df_merge
def save_single_comparison_plots_by_sleep_time(df, chart, unique_message_sizes, columns, y, hue, title, kind='point'):
comparison_columns = []
for column in columns:
for name in names:
for message_size in unique_message_sizes:
comparison_columns.append(add_suffix(add_suffix(column, name), message_size))
apimchart.save_multi_columns_categorical_charts(df, chart, comparison_columns, y, hue, title, len(columns) == 1,
columns[0], col='Sleep Time (ms)', kind=kind)
def save_single_comparison_plots(df):
df_merge = merge_all_sleep_time_and_concurrent_users(df)
unique_message_sizes = df['Message Size (Bytes)'].unique()
chart_prefix = 'comparison_'
charts = ['thrpt', 'avgt', 'response_time', 'loadavg', 'network', 'gc']
# Removed '90th Percentile (ms)'. Too much data points
comparison_columns = [['Throughput'], ['Average (ms)'], ['95th Percentile (ms)', '99th Percentile (ms)'],
['API Manager Load Average - Last 1 minute', 'API Manager Load Average - Last 5 minutes',
'API Manager Load Average - Last 15 minutes'], ['Received (KB/sec)', 'Sent (KB/sec)'],
['API Manager GC Throughput (%)']]
ycolumns = ['Throughput (Requests/sec)', 'Average Response Time (ms)', 'Response Time (ms)', 'Load Average',
'Network Throughput (KB/sec)', 'GC Throughput (%)']
title_prefixes = ['Throughput', 'Average Response Time', 'Response Time Percentiles', 'Load Average',
'Network Throughput', 'GC Throughput']
plot_kinds = ['point', 'point', 'bar', 'point', 'point', 'point']
for chart, columns, y, title_prefix, plot_kind in zip(charts, comparison_columns, ycolumns, title_prefixes,
plot_kinds):
save_single_comparison_plots_by_sleep_time(df_merge, chart_prefix + chart, unique_message_sizes, columns, y,
'API Manager', title_prefix + ' vs Concurrent Users', kind=plot_kind)
if __name__ == "__main__":
main()
|
|
#! /usr/bin/env python
#
# SCons - a Software Constructor
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/script/sconsign.py 2014/07/05 09:42:21 garyo"
__version__ = "2.3.2"
__build__ = ""
__buildsys__ = "lubuntu"
__date__ = "2014/07/05 09:42:21"
__developer__ = "garyo"
import os
import sys
##############################################################################
# BEGIN STANDARD SCons SCRIPT HEADER
#
# This is the cut-and-paste logic so that a self-contained script can
# interoperate correctly with different SCons versions and installation
# locations for the engine. If you modify anything in this section, you
# should also change other scripts that use this same header.
##############################################################################
# Strip the script directory from sys.path() so on case-insensitive
# (WIN32) systems Python doesn't think that the "scons" script is the
# "SCons" package. Replace it with our own library directories
# (version-specific first, in case they installed by hand there,
# followed by generic) so we pick up the right version of the build
# engine modules if they're in either directory.
script_dir = sys.path[0]
if script_dir in sys.path:
sys.path.remove(script_dir)
libs = []
if "SCONS_LIB_DIR" in os.environ:
libs.append(os.environ["SCONS_LIB_DIR"])
local_version = 'scons-local-' + __version__
local = 'scons-local'
if script_dir:
local_version = os.path.join(script_dir, local_version)
local = os.path.join(script_dir, local)
libs.append(os.path.abspath(local_version))
libs.append(os.path.abspath(local))
scons_version = 'scons-%s' % __version__
# preferred order of scons lookup paths
prefs = []
try:
import pkg_resources
except ImportError:
pass
else:
# when running from an egg add the egg's directory
try:
d = pkg_resources.get_distribution('scons')
except pkg_resources.DistributionNotFound:
pass
else:
prefs.append(d.location)
if sys.platform == 'win32':
# sys.prefix is (likely) C:\Python*;
# check only C:\Python*.
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, 'Lib', 'site-packages'))
else:
# On other (POSIX) platforms, things are more complicated due to
# the variety of path names and library locations. Try to be smart
# about it.
if script_dir == 'bin':
# script_dir is `pwd`/bin;
# check `pwd`/lib/scons*.
prefs.append(os.getcwd())
else:
if script_dir == '.' or script_dir == '':
script_dir = os.getcwd()
head, tail = os.path.split(script_dir)
if tail == "bin":
# script_dir is /foo/bin;
# check /foo/lib/scons*.
prefs.append(head)
head, tail = os.path.split(sys.prefix)
if tail == "usr":
# sys.prefix is /foo/usr;
# check /foo/usr/lib/scons* first,
# then /foo/usr/local/lib/scons*.
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, "local"))
elif tail == "local":
h, t = os.path.split(head)
if t == "usr":
# sys.prefix is /foo/usr/local;
# check /foo/usr/local/lib/scons* first,
# then /foo/usr/lib/scons*.
prefs.append(sys.prefix)
prefs.append(head)
else:
# sys.prefix is /foo/local;
# check only /foo/local/lib/scons*.
prefs.append(sys.prefix)
else:
# sys.prefix is /foo (ends in neither /usr or /local);
# check only /foo/lib/scons*.
prefs.append(sys.prefix)
temp = [os.path.join(x, 'lib') for x in prefs]
temp.extend([os.path.join(x,
'lib',
'python' + sys.version[:3],
'site-packages') for x in prefs])
prefs = temp
# Add the parent directory of the current python's library to the
# preferences. On SuSE-91/AMD64, for example, this is /usr/lib64,
# not /usr/lib.
try:
libpath = os.__file__
except AttributeError:
pass
else:
# Split /usr/libfoo/python*/os.py to /usr/libfoo/python*.
libpath, tail = os.path.split(libpath)
# Split /usr/libfoo/python* to /usr/libfoo
libpath, tail = os.path.split(libpath)
# Check /usr/libfoo/scons*.
prefs.append(libpath)
# Look first for 'scons-__version__' in all of our preference libs,
# then for 'scons'.
libs.extend([os.path.join(x, scons_version) for x in prefs])
libs.extend([os.path.join(x, 'scons') for x in prefs])
sys.path = libs + sys.path
##############################################################################
# END STANDARD SCons SCRIPT HEADER
##############################################################################
import SCons.compat # so pickle will import cPickle instead
import whichdb
import time
import pickle
import imp
import SCons.SConsign
def my_whichdb(filename):
if filename[-7:] == ".dblite":
return "SCons.dblite"
try:
f = open(filename + ".dblite", "rb")
f.close()
return "SCons.dblite"
except IOError:
pass
return _orig_whichdb(filename)
_orig_whichdb = whichdb.whichdb
whichdb.whichdb = my_whichdb
def my_import(mname):
if '.' in mname:
i = mname.rfind('.')
parent = my_import(mname[:i])
fp, pathname, description = imp.find_module(mname[i+1:],
parent.__path__)
else:
fp, pathname, description = imp.find_module(mname)
return imp.load_module(mname, fp, pathname, description)
class Flagger(object):
default_value = 1
def __setitem__(self, item, value):
self.__dict__[item] = value
self.default_value = 0
def __getitem__(self, item):
return self.__dict__.get(item, self.default_value)
Do_Call = None
Print_Directories = []
Print_Entries = []
Print_Flags = Flagger()
Verbose = 0
Readable = 0
def default_mapper(entry, name):
try:
val = eval("entry."+name)
except:
val = None
return str(val)
def map_action(entry, name):
try:
bact = entry.bact
bactsig = entry.bactsig
except AttributeError:
return None
return '%s [%s]' % (bactsig, bact)
def map_timestamp(entry, name):
try:
timestamp = entry.timestamp
except AttributeError:
timestamp = None
if Readable and timestamp:
return "'" + time.ctime(timestamp) + "'"
else:
return str(timestamp)
def map_bkids(entry, name):
try:
bkids = entry.bsources + entry.bdepends + entry.bimplicit
bkidsigs = entry.bsourcesigs + entry.bdependsigs + entry.bimplicitsigs
except AttributeError:
return None
result = []
for i in range(len(bkids)):
result.append(nodeinfo_string(bkids[i], bkidsigs[i], " "))
if result == []:
return None
return "\n ".join(result)
map_field = {
'action' : map_action,
'timestamp' : map_timestamp,
'bkids' : map_bkids,
}
map_name = {
'implicit' : 'bkids',
}
def field(name, entry, verbose=Verbose):
if not Print_Flags[name]:
return None
fieldname = map_name.get(name, name)
mapper = map_field.get(fieldname, default_mapper)
val = mapper(entry, name)
if verbose:
val = name + ": " + val
return val
def nodeinfo_raw(name, ninfo, prefix=""):
# This just formats the dictionary, which we would normally use str()
# to do, except that we want the keys sorted for deterministic output.
d = ninfo.__dict__
try:
keys = ninfo.field_list + ['_version_id']
except AttributeError:
keys = sorted(d.keys())
l = []
for k in keys:
l.append('%s: %s' % (repr(k), repr(d.get(k))))
if '\n' in name:
name = repr(name)
return name + ': {' + ', '.join(l) + '}'
def nodeinfo_cooked(name, ninfo, prefix=""):
try:
field_list = ninfo.field_list
except AttributeError:
field_list = []
if '\n' in name:
name = repr(name)
outlist = [name+':'] + [_f for _f in [field(x, ninfo, Verbose) for x in field_list] if _f]
if Verbose:
sep = '\n ' + prefix
else:
sep = ' '
return sep.join(outlist)
nodeinfo_string = nodeinfo_cooked
def printfield(name, entry, prefix=""):
outlist = field("implicit", entry, 0)
if outlist:
if Verbose:
print " implicit:"
print " " + outlist
outact = field("action", entry, 0)
if outact:
if Verbose:
print " action: " + outact
else:
print " " + outact
def printentries(entries, location):
if Print_Entries:
for name in Print_Entries:
try:
entry = entries[name]
except KeyError:
sys.stderr.write("sconsign: no entry `%s' in `%s'\n" % (name, location))
else:
try:
ninfo = entry.ninfo
except AttributeError:
print name + ":"
else:
print nodeinfo_string(name, entry.ninfo)
printfield(name, entry.binfo)
else:
for name in sorted(entries.keys()):
entry = entries[name]
try:
ninfo = entry.ninfo
except AttributeError:
print name + ":"
else:
print nodeinfo_string(name, entry.ninfo)
printfield(name, entry.binfo)
class Do_SConsignDB(object):
def __init__(self, dbm_name, dbm):
self.dbm_name = dbm_name
self.dbm = dbm
def __call__(self, fname):
# The *dbm modules stick their own file suffixes on the names
# that are passed in. This is causes us to jump through some
# hoops here to be able to allow the user
try:
# Try opening the specified file name. Example:
# SPECIFIED OPENED BY self.dbm.open()
# --------- -------------------------
# .sconsign => .sconsign.dblite
# .sconsign.dblite => .sconsign.dblite.dblite
db = self.dbm.open(fname, "r")
except (IOError, OSError), e:
print_e = e
try:
# That didn't work, so try opening the base name,
# so that if the actually passed in 'sconsign.dblite'
# (for example), the dbm module will put the suffix back
# on for us and open it anyway.
db = self.dbm.open(os.path.splitext(fname)[0], "r")
except (IOError, OSError):
# That didn't work either. See if the file name
# they specified just exists (independent of the dbm
# suffix-mangling).
try:
open(fname, "r")
except (IOError, OSError), e:
# Nope, that file doesn't even exist, so report that
# fact back.
print_e = e
sys.stderr.write("sconsign: %s\n" % (print_e))
return
except KeyboardInterrupt:
raise
except pickle.UnpicklingError:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s'\n" % (self.dbm_name, fname))
return
except Exception, e:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s': %s\n" % (self.dbm_name, fname, e))
return
if Print_Directories:
for dir in Print_Directories:
try:
val = db[dir]
except KeyError:
sys.stderr.write("sconsign: no dir `%s' in `%s'\n" % (dir, args[0]))
else:
self.printentries(dir, val)
else:
for dir in sorted(db.keys()):
self.printentries(dir, db[dir])
def printentries(self, dir, val):
print '=== ' + dir + ':'
printentries(pickle.loads(val), dir)
def Do_SConsignDir(name):
try:
fp = open(name, 'rb')
except (IOError, OSError), e:
sys.stderr.write("sconsign: %s\n" % (e))
return
try:
sconsign = SCons.SConsign.Dir(fp)
except KeyboardInterrupt:
raise
except pickle.UnpicklingError:
sys.stderr.write("sconsign: ignoring invalid .sconsign file `%s'\n" % (name))
return
except Exception, e:
sys.stderr.write("sconsign: ignoring invalid .sconsign file `%s': %s\n" % (name, e))
return
printentries(sconsign.entries, args[0])
##############################################################################
import getopt
helpstr = """\
Usage: sconsign [OPTIONS] FILE [...]
Options:
-a, --act, --action Print build action information.
-c, --csig Print content signature information.
-d DIR, --dir=DIR Print only info about DIR.
-e ENTRY, --entry=ENTRY Print only info about ENTRY.
-f FORMAT, --format=FORMAT FILE is in the specified FORMAT.
-h, --help Print this message and exit.
-i, --implicit Print implicit dependency information.
-r, --readable Print timestamps in human-readable form.
--raw Print raw Python object representations.
-s, --size Print file sizes.
-t, --timestamp Print timestamp information.
-v, --verbose Verbose, describe each field.
"""
opts, args = getopt.getopt(sys.argv[1:], "acd:e:f:hirstv",
['act', 'action',
'csig', 'dir=', 'entry=',
'format=', 'help', 'implicit',
'raw', 'readable',
'size', 'timestamp', 'verbose'])
for o, a in opts:
if o in ('-a', '--act', '--action'):
Print_Flags['action'] = 1
elif o in ('-c', '--csig'):
Print_Flags['csig'] = 1
elif o in ('-d', '--dir'):
Print_Directories.append(a)
elif o in ('-e', '--entry'):
Print_Entries.append(a)
elif o in ('-f', '--format'):
Module_Map = {'dblite' : 'SCons.dblite',
'sconsign' : None}
dbm_name = Module_Map.get(a, a)
if dbm_name:
try:
dbm = my_import(dbm_name)
except:
sys.stderr.write("sconsign: illegal file format `%s'\n" % a)
print helpstr
sys.exit(2)
Do_Call = Do_SConsignDB(a, dbm)
else:
Do_Call = Do_SConsignDir
elif o in ('-h', '--help'):
print helpstr
sys.exit(0)
elif o in ('-i', '--implicit'):
Print_Flags['implicit'] = 1
elif o in ('--raw',):
nodeinfo_string = nodeinfo_raw
elif o in ('-r', '--readable'):
Readable = 1
elif o in ('-s', '--size'):
Print_Flags['size'] = 1
elif o in ('-t', '--timestamp'):
Print_Flags['timestamp'] = 1
elif o in ('-v', '--verbose'):
Verbose = 1
if Do_Call:
for a in args:
Do_Call(a)
else:
for a in args:
dbm_name = whichdb.whichdb(a)
if dbm_name:
Map_Module = {'SCons.dblite' : 'dblite'}
dbm = my_import(dbm_name)
Do_SConsignDB(Map_Module.get(dbm_name, dbm_name), dbm)(a)
else:
Do_SConsignDir(a)
sys.exit(0)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
#!/usr/bin/env python3
#from collections import namedtuple # student in Student datatype
from sys import argv
import sys
import signal
from queue import *
version = [0, 3, 2]
repo = "https://github.com/Paul-Haley/practical_questions"
prompt = "\nPlease enter your eight digit student number: "
def signal_handler(sig, frame):
if sig == signal.SIGINT:
key = 'C'
elif sig == signal.SIGQUIT:
key = '\\'
elif sig == signal.SIGTSTP:
key = 'Z'
else:
key = "anything"
print("\a\tDo not use Ctrl + %s\nAsk the tutor for assistance" % key)
# Will re-print prompt as the program is likely waiting for input
print(prompt, end='')
signal.signal(signal.SIGINT, signal_handler) # ^C
signal.signal(signal.SIGTERM, signal_handler) #TODO: Implement proper handling
signal.signal(signal.SIGQUIT, signal_handler) # ^\
signal.signal(signal.SIGTSTP, signal_handler) # ^Z
"""Given number n, returns the appropriate suffix for the ordinal number."""
def get_ordinal(n):
if (n - 11) % 100 != 0 and n % 10 == 1:
return "st"
if (n - 12) % 100 != 0 and n % 10 == 2:
return "nd"
if (n - 13) % 100 != 0 and n % 10 == 3:
return "rd"
return "th"
def readEnrollment(students, arg):
file = open(arg)
practical = ""
count = 0 # line count
for line in file:
count += 1
if line.startswith("Class List"):
practical = line[-2:] # to be implemented
continue
# Things we want to skip
if line.find("St Lucia") != -1 or line.startswith("StudentID") or \
line.startswith("End") or len(line) <= 1:
continue
# splitting line on ',' as csv
items = line.split(',')
if len(items) > 4 and items[0].isnumeric():
# We know that we have a number (student ID) and a name
students[int(items[0])] = items[4].strip()
continue
#No conditions meet, we have a problem on this line
print("""File error: %s
On line %d:
%s
The line is not comma ',' separated or does not have the student number in the
first column and the student's first name in the 5th column.""" %
(arg, count, line))
sys.exit()
file.close()
#Student = namedtuple("Student", "name class")
"""Given the current Inqueue of student questions, print the current
estimated wait time."""
def print_wait_time(queue):
eta = questions.qsize() * 1.5
print("The estimated wait time is approximately: %G minute(s)" % eta)
#TODO: better estimate
#TODO: take different actions based of excessive queue size (printing)
# MAIN PROGRAM ***************************************************************
if len(argv) == 1: # no arguments given, print usage and exit
print("""Usage:
scheduler.py class_list [class_list...]""")
sys.exit()
print("Reading enrollment lists...")
students = {} # student ID -> student name
# Reads each argument given to import all student IDs and names
for i in range(len(argv) - 1):
readEnrollment(students, argv[i + 1])
print("%d student(s) were found" % len(students))
class InQueue(Queue):
def __contains__(self, item):
return item in self.queue
def __str__(self):
result = ""
for item in self.queue:
result += str(item) + ", "
return result
"""Finds index of item in queue, return index value or -1 if not there."""
def index(self, item):
for i in range(self.qsize()):
if self.queue[i] == item:
return i
return -1
print("""Welcome to the Practical Questions tool!
This program was developed and is maintained by Paul Haley at:
%s
Version: %d.%d.%d""" % (repo, version[0], version[1], version[2]))
questions = InQueue(70) # Check growth ability
student_number = ""
while (True):
# Get input
try :
student_number = input(prompt)
except EOFError: # Students will intentionally try to break things
print("Do not use Ctrl + D\nAsk the tutor for assistance")
student_number = "" # clearing the bad input
continue
# Give next student if available
if student_number == "n" or student_number == 'next':
if questions.empty():
print("No questions awaiting answers...")
continue
print("\aPlease answer the question of: %s" % students[questions.get()])
continue
# Report size of queue and ETA of tutor
if student_number == "s" or student_number == "size":
people = questions.qsize()
print("There are currently %d student(s) with questions in the queue." %
(people))
print_wait_time(questions)
continue
# Help for system
if student_number == "h" or student_number == "help":
print("""Practical Questions Help
To queue a question, just type your student number (8 digits) at the prompt
with no 's'.
Command Short Response
help h Display this help page
next n Pops the next student in the queue and displays their name
size s Display the size of the queue and the expected wait time
version v Display the version number of the program
If you have found an issue with the software, please notify the tutors on duty
and raise an issue on the tool's code repository:
%s
""" % (repo))
continue
# Display version number
if student_number == 'v' or student_number == "version":
print("Practical Questions by Paul Haley\n\n\t Version: %d.%d.%d" %
(version[0], version[1], version[2]))
print("The source code can be found at:\n%s" % (repo))
continue
# Screen dump remaining queue and quit
if student_number == "exit":
if input("Are you sure (Y/n)? ") == 'Y':
print(questions)
sys.exit()
continue
# Invalid student number
if len(student_number) != 8 or not student_number.isnumeric() or \
int(student_number[0]) != 4:
print("Please enter your 8 digit student number with no 's'")
continue
# Student number already in queue.
if int(student_number) in questions:
n = questions.index(int(student_number)) + 1
print("Your number is already in the queue! Your position is %d%s" %
(n, get_ordinal(n)))
print_wait_time(questions)
continue
# Student number unseen, add them
if int(student_number) not in students.keys():
print("Your student number is not recognised! If you think this is a" +
" fault, consult the tutors on duty.")
continue
# Number is of class student who is not already queued
questions.put(int(student_number))
n = questions.qsize()
print("Your student number as been successfullly added!\n" +
"Please wait until a tutor comes. You are %d%s in the queue." %
(n, get_ordinal(n)))
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training utilities for Estimator to use Distribute Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
# pylint: disable=protected-access
CHIEF = dc._TaskType.CHIEF
EVALUATOR = dc._TaskType.EVALUATOR
PS = dc._TaskType.PS
WORKER = dc._TaskType.WORKER
# pylint: enable=protected-access
def _count_ps(cluster_spec):
"""Counts the number of parameter servers in cluster_spec."""
if not cluster_spec:
raise RuntimeError(
'Internal error: `_count_ps` does not expect empty cluster_spec.')
return len(cluster_spec.as_dict().get(PS, []))
def _count_worker(cluster_spec, chief_task_type):
"""Counts the number of workers (including chief) in cluster_spec."""
if not cluster_spec:
raise RuntimeError(
'Internal error: `_count_worker` does not expect empty cluster_spec.')
return (len(cluster_spec.as_dict().get(WORKER, [])) + len(
cluster_spec.as_dict().get(chief_task_type, [])))
def _get_global_id(cluster_spec, task_type, task_id, chief_task_type):
"""Returns the global id of the given task type in a cluster."""
if not task_type:
return 0
# Sort task names in cluster by "chief"/"master", "evaluator", "worker"
# and "ps". More details can be found at the documentation of
# @{tf.estimator.RunConfig.global_id_in_cluster}.
task_type_ordered_list = []
if chief_task_type in cluster_spec.jobs:
task_type_ordered_list = [chief_task_type]
task_type_ordered_list.extend([
t for t in sorted(cluster_spec.jobs) if t != chief_task_type and t != PS
])
if PS in cluster_spec.jobs:
task_type_ordered_list.append(PS)
# Find the right gloabl_id for current task.
next_global_id = 0
for t in task_type_ordered_list:
if t == task_type:
return next_global_id + task_id
# `cluster_spec.job_tasks` returns all task addresses of type `t`.
next_global_id += len(cluster_spec.job_tasks(t))
# It is unexpected that it passes through all task_types in
# `task_type_ordered_list`.
raise RuntimeError('Internal Error: `task_type` ({}) is not in '
'cluster_spec ({}).'.format(task_type, cluster_spec))
def _init_run_config_from_worker_context(config, worker_context):
"""Initializes run config from distribute coordinator's worker context."""
# pylint: disable=protected-access
config._service = None
config._cluster_spec = worker_context.cluster_spec
config._task_type = worker_context.task_type
config._task_id = worker_context.task_id
config._evaluation_master = worker_context.master_target
config._master = worker_context.master_target
config._is_chief = worker_context.is_chief
if config._cluster_spec:
# Distributed mode.
if config._task_type != EVALUATOR:
config._num_ps_replicas = _count_ps(config._cluster_spec)
config._num_worker_replicas = _count_worker(
config._cluster_spec, chief_task_type=CHIEF)
config._global_id_in_cluster = _get_global_id(
config._cluster_spec,
config._task_type,
config._task_id,
chief_task_type=CHIEF)
else:
# Evaluator task should not be aware of the other tasks.
config._cluster_spec = server_lib.ClusterSpec({})
config._num_ps_replicas = 0
config._num_worker_replicas = 0
config._global_id_in_cluster = None # undefined
else:
# Local mode.
config._global_id_in_cluster = 0
config._num_ps_replicas = 0
config._num_worker_replicas = 1
def init_run_config(config, tf_config):
"""Initializes RunConfig for distribution strategies."""
# pylint: disable=protected-access
if (config._experimental_distribute and
config._experimental_distribute.train_distribute):
if config._train_distribute:
raise ValueError('Either `train_distribute` or'
'`experimental_distribute.train_distribute` can be set.')
config._train_distribute = config._experimental_distribute.train_distribute
if (config._experimental_distribute and
config._experimental_distribute.eval_distribute):
if config._eval_distribute:
raise ValueError('Either `eval_distribute` or'
'`experimental_distribute.eval_distribute` can be set.')
config._eval_distribute = config._experimental_distribute.eval_distribute
cluster_spec = server_lib.ClusterSpec(tf_config.get('cluster', {}))
config._init_distributed_setting_from_environment_var({})
# Use distribute coordinator with STANDALONE_CLIENT mode if
# `experimental_distribute.remote_cluster` is set.
if (config._train_distribute and config._experimental_distribute and
config._experimental_distribute.remote_cluster):
if cluster_spec:
raise ValueError('Cannot set both "cluster_spec" of TF_CONFIG and '
'`experimental_distribute.remote_cluster`')
config._distribute_coordinator_mode = dc.CoordinatorMode.STANDALONE_CLIENT
config._cluster_spec = config._experimental_distribute.remote_cluster
logging.info('RunConfig initialized for Distribute Coordinator with '
'STANDALONE_CLIENT mode')
return
# Don't use distribute coordinator if it is local training or cluster has a
# MASTER job or `train_distribute` is not specifed.
if (not tf_config or 'master' in cluster_spec.jobs or
not config._train_distribute):
config._distribute_coordinator_mode = None
config._init_distributed_setting_from_environment_var(tf_config)
config._maybe_overwrite_session_config_for_distributed_training()
logging.info('Not using Distribute Coordinator.')
return
# Use distribute coordinator with INDEPENDENT_WORKER mode otherwise.
assert tf_config
# Set the cluster_spec only since the distributed setting will come from
# distribute coordinator.
config._cluster_spec = cluster_spec
config._distribute_coordinator_mode = dc.CoordinatorMode.INDEPENDENT_WORKER
logging.info('RunConfig initialized for Distribute Coordinator with '
'INDEPENDENT_WORKER mode')
def should_run_distribute_coordinator(config):
"""Checks the config to see whether to run distribute coordinator."""
# pylint: disable=protected-access
if (not hasattr(config, '_distribute_coordinator_mode') or
config._distribute_coordinator_mode is None):
return False
if (not isinstance(config._distribute_coordinator_mode, six.string_types) or
config._distribute_coordinator_mode not in [
dc.CoordinatorMode.STANDALONE_CLIENT,
dc.CoordinatorMode.INDEPENDENT_WORKER
]):
logging.warning('Unexpected distribute_coordinator_mode: %r',
config._distribute_coordinator_mode)
return False
if not config.cluster_spec:
logging.warning('Running `train_and_evaluate` locally, ignoring '
'`experimental_distribute_coordinator_mode`.')
return False
return True
def train_and_evaluate(estimator, train_spec, eval_spec, executor_cls):
"""Run distribute coordinator for Estimator's `train_and_evaluate`.
Args:
estimator: An `Estimator` instance to train and evaluate.
train_spec: A `TrainSpec` instance to specify the training specification.
eval_spec: A `EvalSpec` instance to specify the evaluation and export
specification.
executor_cls: the evaluation executor class of Estimator.
Raises:
ValueError: if `distribute_coordinator_mode` is None in RunConfig.
"""
run_config = estimator.config
if not run_config._distribute_coordinator_mode: # pylint: disable=protected-access
raise ValueError(
'Distribute coordinator mode is not specified in `RunConfig`.')
def _worker_fn(strategy):
"""Function for worker task."""
local_estimator = copy.deepcopy(estimator)
# pylint: disable=protected-access
local_estimator._config._train_distribute = strategy
_init_run_config_from_worker_context(
local_estimator._config, dc_context.get_current_worker_context())
local_estimator._train_distribution = strategy
# pylint: enable=protected-access
local_estimator.train(
input_fn=train_spec.input_fn,
max_steps=train_spec.max_steps,
hooks=list(train_spec.hooks))
def _eval_fn(strategy):
"""Function for evaluator task."""
local_estimator = copy.deepcopy(estimator)
# pylint: disable=protected-access
local_estimator._config._eval_distribute = strategy
_init_run_config_from_worker_context(
local_estimator._config, dc_context.get_current_worker_context())
local_estimator._eval_distribution = strategy
executor = executor_cls(local_estimator, train_spec, eval_spec)
executor._start_continuous_evaluation()
# pylint: enable=protected-access
# pylint: disable=protected-access
if (run_config._distribute_coordinator_mode ==
dc.CoordinatorMode.STANDALONE_CLIENT):
cluster_spec = run_config.cluster_spec
assert cluster_spec
else:
# The cluster_spec comes from TF_CONFIG environment variable if it is
# INDEPENDENT_WORKER mode.
cluster_spec = None
dc.run_distribute_coordinator(
_worker_fn,
run_config.train_distribute,
_eval_fn,
run_config.eval_distribute,
mode=run_config._distribute_coordinator_mode,
cluster_spec=cluster_spec,
session_config=run_config.session_config)
|
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import errno
import os
import re
from oslo_config import cfg
from oslo_log import log
from manila.common import constants
from manila import exception
from manila.i18n import _
from manila.share.drivers.ganesha import manager as ganesha_manager
from manila.share.drivers.ganesha import utils as ganesha_utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class NASHelperBase(metaclass=abc.ABCMeta):
"""Interface to work with share."""
# drivers that use a helper derived from this class
# should pass the following attributes to
# ganesha_utils.validate_access_rule in their
# update_access implementation.
supported_access_types = ()
supported_access_levels = ()
def __init__(self, execute, config, **kwargs):
self.configuration = config
self._execute = execute
def init_helper(self):
"""Initializes protocol-specific NAS drivers."""
@abc.abstractmethod
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules of share."""
class GaneshaNASHelper(NASHelperBase):
"""Perform share access changes using Ganesha version < 2.4."""
supported_access_types = ('ip', )
supported_access_levels = (constants.ACCESS_LEVEL_RW,
constants.ACCESS_LEVEL_RO)
def __init__(self, execute, config, tag='<no name>', **kwargs):
super(GaneshaNASHelper, self).__init__(execute, config, **kwargs)
self.tag = tag
_confrx = re.compile(r'\.(conf|json)\Z')
def _load_conf_dir(self, dirpath, must_exist=True):
"""Load Ganesha config files in dirpath in alphabetic order."""
try:
dirlist = os.listdir(dirpath)
except OSError as e:
if e.errno != errno.ENOENT or must_exist:
raise
dirlist = []
LOG.info('Loading Ganesha config from %s.', dirpath)
conf_files = list(filter(self._confrx.search, dirlist))
conf_files.sort()
export_template = {}
for conf_file in conf_files:
with open(os.path.join(dirpath, conf_file)) as f:
ganesha_utils.patch(
export_template,
ganesha_manager.parseconf(f.read()))
return export_template
def init_helper(self):
"""Initializes protocol-specific NAS drivers."""
self.ganesha = ganesha_manager.GaneshaManager(
self._execute,
self.tag,
ganesha_config_path=self.configuration.ganesha_config_path,
ganesha_export_dir=self.configuration.ganesha_export_dir,
ganesha_db_path=self.configuration.ganesha_db_path,
ganesha_service_name=self.configuration.ganesha_service_name)
system_export_template = self._load_conf_dir(
self.configuration.ganesha_export_template_dir,
must_exist=False)
if system_export_template:
self.export_template = system_export_template
else:
self.export_template = self._default_config_hook()
def _default_config_hook(self):
"""The default export block.
Subclass this to add FSAL specific defaults.
Suggested approach: take the return value of superclass'
method, patch with dict containing your defaults, and
return the result. However, you can also provide your
defaults from scratch with no regard to superclass.
"""
return self._load_conf_dir(ganesha_utils.path_from(__file__, "conf"))
def _fsal_hook(self, base_path, share, access):
"""Subclass this to create FSAL block."""
return {}
def _cleanup_fsal_hook(self, base_path, share, access):
"""Callback for FSAL specific cleanup after removing an export."""
pass
def _allow_access(self, base_path, share, access):
"""Allow access to the share."""
ganesha_utils.validate_access_rule(
self.supported_access_types, self.supported_access_levels,
access, abort=True)
access = ganesha_utils.fixup_access_rule(access)
cf = {}
accid = access['id']
name = share['name']
export_name = "%s--%s" % (name, accid)
ganesha_utils.patch(cf, self.export_template, {
'EXPORT': {
'Export_Id': self.ganesha.get_export_id(),
'Path': os.path.join(base_path, name),
'Pseudo': os.path.join(base_path, export_name),
'Tag': accid,
'CLIENT': {
'Clients': access['access_to']
},
'FSAL': self._fsal_hook(base_path, share, access)
}
})
self.ganesha.add_export(export_name, cf)
def _deny_access(self, base_path, share, access):
"""Deny access to the share."""
self.ganesha.remove_export("%s--%s" % (share['name'], access['id']))
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules of share."""
rule_state_map = {}
if not (add_rules or delete_rules):
add_rules = access_rules
self.ganesha.reset_exports()
self.ganesha.restart_service()
for rule in add_rules:
try:
self._allow_access('/', share, rule)
except (exception.InvalidShareAccess,
exception.InvalidShareAccessLevel):
rule_state_map[rule['id']] = {'state': 'error'}
continue
for rule in delete_rules:
self._deny_access('/', share, rule)
return rule_state_map
class GaneshaNASHelper2(GaneshaNASHelper):
"""Perform share access changes using Ganesha version >= 2.4."""
def __init__(self, execute, config, tag='<no name>', **kwargs):
super(GaneshaNASHelper2, self).__init__(execute, config, **kwargs)
if self.configuration.ganesha_rados_store_enable:
self.rados_client = kwargs.pop('rados_client')
def init_helper(self):
"""Initializes protocol-specific NAS drivers."""
kwargs = {
'ganesha_config_path': self.configuration.ganesha_config_path,
'ganesha_export_dir': self.configuration.ganesha_export_dir,
'ganesha_service_name': self.configuration.ganesha_service_name
}
if self.configuration.ganesha_rados_store_enable:
kwargs['ganesha_rados_store_enable'] = (
self.configuration.ganesha_rados_store_enable)
if not self.configuration.ganesha_rados_store_pool_name:
raise exception.GaneshaException(
_('"ganesha_rados_store_pool_name" config option is not '
'set in the driver section.'))
kwargs['ganesha_rados_store_pool_name'] = (
self.configuration.ganesha_rados_store_pool_name)
kwargs['ganesha_rados_export_index'] = (
self.configuration.ganesha_rados_export_index)
kwargs['ganesha_rados_export_counter'] = (
self.configuration.ganesha_rados_export_counter)
kwargs['rados_client'] = self.rados_client
else:
kwargs['ganesha_db_path'] = self.configuration.ganesha_db_path
self.ganesha = ganesha_manager.GaneshaManager(
self._execute, self.tag, **kwargs)
system_export_template = self._load_conf_dir(
self.configuration.ganesha_export_template_dir,
must_exist=False)
if system_export_template:
self.export_template = system_export_template
else:
self.export_template = self._default_config_hook()
def _get_export_path(self, share):
"""Subclass this to return export path."""
raise NotImplementedError()
def _get_export_pseudo_path(self, share):
"""Subclass this to return export pseudo path."""
raise NotImplementedError()
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules of share.
Creates an export per share. Modifies access rules of shares by
dynamically updating exports via DBUS.
"""
confdict = {}
existing_access_rules = []
rule_state_map = {}
if self.ganesha.check_export_exists(share['name']):
confdict = self.ganesha._read_export(share['name'])
existing_access_rules = confdict["EXPORT"]["CLIENT"]
if not isinstance(existing_access_rules, list):
existing_access_rules = [existing_access_rules]
else:
if not access_rules:
LOG.warning("Trying to remove export file '%s' but it's "
"already gone",
self.ganesha._getpath(share['name']))
return
wanted_rw_clients, wanted_ro_clients = [], []
for rule in access_rules:
try:
ganesha_utils.validate_access_rule(
self.supported_access_types, self.supported_access_levels,
rule, True)
except (exception.InvalidShareAccess,
exception.InvalidShareAccessLevel):
rule_state_map[rule['id']] = {'state': 'error'}
continue
rule = ganesha_utils.fixup_access_rule(rule)
if rule['access_level'] == 'rw':
wanted_rw_clients.append(rule['access_to'])
elif rule['access_level'] == 'ro':
wanted_ro_clients.append(rule['access_to'])
if access_rules:
# Add or Update export.
clients = []
if wanted_ro_clients:
clients.append({
'Access_Type': 'ro',
'Clients': ','.join(wanted_ro_clients)
})
if wanted_rw_clients:
clients.append({
'Access_Type': 'rw',
'Clients': ','.join(wanted_rw_clients)
})
if clients: # Empty list if no rules passed validation
if existing_access_rules:
# Update existing export.
ganesha_utils.patch(confdict, {
'EXPORT': {
'CLIENT': clients
}
})
self.ganesha.update_export(share['name'], confdict)
else:
# Add new export.
ganesha_utils.patch(confdict, self.export_template, {
'EXPORT': {
'Export_Id': self.ganesha.get_export_id(),
'Path': self._get_export_path(share),
'Pseudo': self._get_export_pseudo_path(share),
'Tag': share['name'],
'CLIENT': clients,
'FSAL': self._fsal_hook(None, share, None)
}
})
self.ganesha.add_export(share['name'], confdict)
else:
# No clients have access to the share. Remove export.
self.ganesha.remove_export(share['name'])
self._cleanup_fsal_hook(None, share, None)
return rule_state_map
|
|
"""
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
from time import time
import matplotlib.pyplot as plt
import numpy as np
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
# data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Generate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
|
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import argparse
import sys
import xml.etree.ElementTree as et
import StringIO
import re
import json
from distutils.sysconfig import get_python_lib
sys.path.append(get_python_lib() + '/vnc_cfg_api_server')
from operator import itemgetter, attrgetter
from vnc_api import vnc_api
from vnc_cfg_api_server import gen
from vnc_api.gen.resource_xsd import *
from vnc_api.gen.resource_common import *
from vnc_cfg_api_server.gen.vnc_ifmap_client_gen import *
from vnc_api.gen.vnc_api_client_gen import *
import pycassa
from pycassa.system_manager import *
"""
Input is list of tuple dictionary <type, name, value, prop, refs>
type := project, domain ...
value := dependent graph
prop := Properties of this node
refs := parenr->node reference meta data
"""
def mypretty(l, indent=0, verbose=0):
l = sorted(l, key=itemgetter('type'))
prop_fmt = '\n' + ' ' * (indent + 1)
ref_fmt = '\n' + ' ' * indent
for i in l:
""" Prepare property string"""
propstr = ''
propstr2 = ''
if i['props']:
propstr = [p['name'] for p in i['props']]
propstr = ' (' + ', '.join(propstr) + ')'
if verbose >= 2:
show_list = []
for p in i['props']:
if p['name'] not in vnc_viewer.skip:
show_list.append(p)
propstr2 = ['%s=%s' % (p['name'], p['value'])
for p in show_list]
propstr2 = '\n'.join(propstr2)
propstr2 = propstr2.split('\n')
propstr2 = prop_fmt.join(propstr2)
print ' ' * indent + '%s = '\
% (i['type']) + str(i['name']) + propstr
""" Prepare reference string"""
ref_str = []
if verbose >= 1 and i['refs']:
ref_str = [r['value'] for r in i['refs']]
ref_str = '\n'.join(ref_str)
ref_str = ref_str.split('\n')
ref_str = ref_fmt.join(ref_str)
if len(ref_str) > 0:
print ' ' * indent + ref_str
if len(propstr2) > 0:
print ' ' * (indent + 1) + propstr2
if len(i['value']) > 0:
mypretty(i['value'], indent + 1, verbose)
# end mypretty
"""
Find name in node list. Return the parent and subgraph
for subsequent traversal. Otherwise return def_node (typically None)
"""
def find_node(name, node_list, def_node):
# traverse thru list of dict
for item in node_list:
if name == item['name']:
return (item, item['value'])
return (def_node, def_node)
def find_node_in_tree(fq_path, tree):
path = fq_path.split(':')
# Traverse until name is finished
match, node = find_node('root', tree, tree)
for name in path:
match, n = find_node(name, node, None)
if n is None:
return None
node = n
return node
# end find_node_in_tree
def parse_config(soap_config):
root = et.fromstring(soap_config)
config = []
for r_i in root.findall('*/*/*/resultItem'):
ids = r_i.findall('identity')
ident1 = ids[0].get('name')
try:
ident2 = ids[1].get('name')
except IndexError:
ident2 = None
metas = r_i.find('metadata')
# details
outfile.write('\n' + et.tostring(r_i) + '\n')
outfile.write('ident1 = %s\n' % (ident1))
if ident2:
outfile.write('ident2 = %s\n' % (ident2))
if metas is not None:
outfile.write('metas = %s\n' % (et.tostring(metas)))
if not re.match("^contrail:", ident1):
continue
res = re.search("^contrail:([^:]+):(.*:)*(.*)$", ident1)
type1 = res.group(1)
name1 = res.group(3)
id1 = ident1.split(':')
# strip contrail, type
id1 = id1[2:]
outfile.write('Ident1 type = %s, name = %s\n' % (type1, name1))
if ident2:
res = re.search("^contrail:([^:]+):(.*:)*(.*)$", ident2)
type2 = res.group(1)
name2 = res.group(3)
id2 = ident2.split(':')
# strip contrail, type
id2 = id2[2:]
outfile.write('Ident2 type = %s, name = %s\n' % (type2, name2))
# Traverse until name is finished
match, node = find_node('root', config, config)
for name in id1:
match, n = find_node(name, node, None)
if n is None:
node.append(
{'type': type1, 'name': name1, 'value': [],
'props': [], 'refs': []})
match = node[-1]
node = node[-1]['value']
break
node = n
node1 = node
if ident2:
match, n = find_node(name2, node1, None)
if n is None:
match = {'type': type2, 'name': name2,
'value': [], 'props': [], 'refs': []}
node1.append(match)
# attach property or reference info if available
if metas is None:
continue
for meta in metas:
meta_name = re.sub('{.*}', '', meta.tag)
outfile.write('Handling meta = %s\n' % (meta_name))
if ident2:
if meta_name in link_name_to_xsd_type:
obj = eval(link_name_to_xsd_type[meta_name])()
obj.build(meta)
obj_json = json.dumps(
obj,
default=lambda o: dict(
(k, v) for k,
v in o.__dict__.iteritems()), indent=4)
outfile.write(
'Attaching Reference %s to Id %s\n'
% (meta_name, ident2))
outfile.write('JSON %s = %s\n' % (meta_name, obj_json))
match['refs'].append(
{'name': '%s' % (meta_name), 'value': obj_json})
else:
if meta_name in vnc.prop_name_to_xsd_type:
obj = eval(vnc.prop_name_to_xsd_type[meta_name])()
obj.build(meta)
obj_json = json.dumps(
obj,
default=lambda o: dict(
(k, v) for k,
v in o.__dict__.iteritems()), indent=4)
outfile.write(
'Attaching Property %s to Id %s\n'
% (meta_name, ident1))
outfile.write('JSON %s = %s\n' % (meta_name, obj_json))
match['props'].append(
{'name': '%s' % (meta_name), 'value': obj_json})
return config
# end parse_config
class IfmapClient():
def __init__(self, ifmap_srv_ip, ifmap_srv_port, uname, passwd):
"""
.. attention:: username/passwd from right place
"""
self._CONTRAIL_XSD = "http://www.contrailsystems.com/vnc_cfg.xsd"
self._NAMESPACES = {
'a': 'http://www.w3.org/2003/05/soap-envelope',
'b': 'http://www.trustedcomputinggroup.org/2010/IFMAP/2',
'c': self._CONTRAIL_XSD
}
namespaces = {
'env': "http://www.w3.org/2003/05/soap-envelope",
'ifmap': "http://www.trustedcomputinggroup.org/2010/IFMAP/2",
'meta': "http://www.trustedcomputinggroup.org/"
"2010/IFMAP-METADATA/2",
'contrail': self._CONTRAIL_XSD
}
mapclient = client(("%s" % (ifmap_srv_ip), "%s" % (ifmap_srv_port)),
uname, passwd, namespaces)
result = mapclient.call('newSession', NewSessionRequest())
mapclient.set_session_id(newSessionResult(result).get_session_id())
mapclient.set_publisher_id(newSessionResult(result).get_publisher_id())
self._mapclient = mapclient
# end __init__
def _search(self, start_id, match_meta=None, result_meta=None,
max_depth=1):
# set ifmap search parmeters
srch_params = {}
srch_params['max-depth'] = str(max_depth)
srch_params['max-size'] = '50000000'
if match_meta is not None:
srch_params['match-links'] = match_meta
if result_meta is not None:
# all => don't set result-filter, so server returns all id + meta
if result_meta == "all":
pass
else:
srch_params['result-filter'] = result_meta
else:
# default to return match_meta metadata types only
srch_params['result-filter'] = match_meta
mapclient = self._mapclient
srch_req = SearchRequest(mapclient.get_session_id(), start_id,
search_parameters=srch_params
)
result = mapclient.call('search', srch_req)
return result
# end _search
def ifmap_read(self, ifmap_id, srch_meta, result_meta, field_names=None):
start_id = str(
Identity(name=ifmap_id, type='other', other_type='extended'))
srch_result = self._search(
start_id, srch_meta, result_meta, max_depth=10)
return srch_result
# end ifmap_read
# end class IfmapClient
class VncViewer():
def parse_args(self):
# Eg. python vnc_ifmap_view.py 192.168.1.17 8443 test2 test2
parser = argparse.ArgumentParser(
description="Display IFMAP configuration")
parser.add_argument(
'ifmap_server_ip', help="IP address of ifmap server")
parser.add_argument('ifmap_server_port', help="Port of ifmap server")
parser.add_argument(
'ifmap_username', help="Username known to ifmap server")
parser.add_argument(
'ifmap_password', help="Password known to ifmap server")
parser.add_argument('-v', type=int, default=0, choices=range(0, 3),
help="Turn verbosity on. Default is 0")
"""
parser.add_argument('-n', '--node', default=None,
help = "Start node (fully qualified name such as
default-domain:admin:vn2")
parser.add_argument('-s', '--skip', action='append',
help = "Skip property (such as id-perms)")
"""
self._args = parser.parse_args()
self.verbose = self._args.v
"""
self.start_node = self._args.node
self.skip = self._args.skip
"""
self.start_node = None
self.skip = ['id-perms']
print 'MAP server connection = %s:%s'\
% (self._args.ifmap_server_ip, self._args.ifmap_server_port)
print 'MAP server credentials = %s:%s'\
% (self._args.ifmap_username, self._args.ifmap_password)
print 'Start node = %s' % (self.start_node)
print 'Skip List = %s' % (self.skip)
print 'Verbose = %s' % (self.verbose)
print ''
# end parse_args
def db_connect(self):
ifmap_ip = self._args.ifmap_server_ip
ifmap_port = self._args.ifmap_server_port
user = self._args.ifmap_username
passwd = self._args.ifmap_password
# ifmap interface
db_conn = IfmapClient(ifmap_ip, ifmap_port, user, passwd)
self._db_conn = db_conn
# end db_connect
vnc_viewer = VncViewer()
vnc_viewer.parse_args()
vnc_viewer.db_connect()
#vnc = VncApi('admin', 'contrail123', 'admin', '127.0.0.1', '8082')
vnc = VncApiClientGen(obj_serializer=None)
outfile = file("debug.txt", "w")
""" sample search metas
srch_meta = 'contrail:config-root-domain' (retunn only domains)
srch_meta = ' or '.join(['contrail:config-root-domain',
'contrail:config-root-virtual-router']) (domain or virtual-router)
srch_meta = 'contrail:config-root-domain or
contrail:config-root-virtual-router' (same as above)
srch_meta = 'contrail:domain-project' (search all projects)
srch_meta = None (search everything)
"""
srch_meta = None
result_meta = 'all'
soap_result = vnc_viewer._db_conn.ifmap_read(
'contrail:config-root:root', srch_meta, result_meta)
config = parse_config(soap_result)
if vnc_viewer.start_node is None:
node = config[0]['value']
else:
node = find_node_in_tree(vnc_viewer.start_node, config)
mypretty(node, verbose=vnc_viewer.verbose)
|
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-deploy documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-deploy"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-deploy",
"github_user": "googleapis",
"github_repo": "python-deploy",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-deploy-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-deploy.tex",
"google-cloud-deploy Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(root_doc, "google-cloud-deploy", "google-cloud-deploy Documentation", [author], 1,)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-deploy",
"google-cloud-deploy Documentation",
author,
"google-cloud-deploy",
"google-cloud-deploy Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import asyncore
import socket
import time
import traceback
from waitress.buffers import (
OverflowableBuffer,
ReadOnlyFileBasedBuffer,
)
from waitress.parser import HTTPRequestParser
from waitress.compat import thread
from waitress.task import (
ErrorTask,
WSGITask,
)
from waitress.utilities import (
logging_dispatcher,
InternalServerError,
)
class HTTPChannel(logging_dispatcher, object):
"""
Setting self.requests = [somerequest] prevents more requests from being
received until the out buffers have been flushed.
Setting self.requests = [] allows more requests to be received.
"""
task_class = WSGITask
error_task_class = ErrorTask
parser_class = HTTPRequestParser
request = None # A request parser instance
last_activity = 0 # Time of last activity
will_close = False # set to True to close the socket.
close_when_flushed = False # set to True to close the socket when flushed
requests = () # currently pending requests
sent_continue = False # used as a latch after sending 100 continue
force_flush = False # indicates a need to flush the outbuf
#
# ASYNCHRONOUS METHODS (including __init__)
#
def __init__(
self,
server,
sock,
addr,
adj,
map=None,
):
self.server = server
self.adj = adj
self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)]
self.creation_time = self.last_activity = time.time()
# task_lock used to push/pop requests
self.task_lock = thread.allocate_lock()
# outbuf_lock used to access any outbuf
self.outbuf_lock = thread.allocate_lock()
asyncore.dispatcher.__init__(self, sock, map=map)
# Don't let asyncore.dispatcher throttle self.addr on us.
self.addr = addr
def any_outbuf_has_data(self):
for outbuf in self.outbufs:
if bool(outbuf):
return True
return False
def total_outbufs_len(self):
# genexpr == more funccalls
# use b.__len__ rather than len(b) FBO of not getting OverflowError
# on Python 2
return sum([b.__len__() for b in self.outbufs])
def writable(self):
# if there's data in the out buffer or we've been instructed to close
# the channel (possibly by our server maintenance logic), run
# handle_write
return self.any_outbuf_has_data() or self.will_close
def handle_write(self):
# Precondition: there's data in the out buffer to be sent, or
# there's a pending will_close request
if not self.connected:
# we dont want to close the channel twice
return
# try to flush any pending output
if not self.requests:
# 1. There are no running tasks, so we don't need to try to lock
# the outbuf before sending
# 2. The data in the out buffer should be sent as soon as possible
# because it's either data left over from task output
# or a 100 Continue line sent within "received".
flush = self._flush_some
elif self.force_flush:
# 1. There's a running task, so we need to try to lock
# the outbuf before sending
# 2. This is the last chunk sent by the Nth of M tasks in a
# sequence on this channel, so flush it regardless of whether
# it's >= self.adj.send_bytes. We need to do this now, or it
# won't get done.
flush = self._flush_some_if_lockable
self.force_flush = False
elif (self.total_outbufs_len() >= self.adj.send_bytes):
# 1. There's a running task, so we need to try to lock
# the outbuf before sending
# 2. Only try to send if the data in the out buffer is larger
# than self.adj_bytes to avoid TCP fragmentation
flush = self._flush_some_if_lockable
else:
# 1. There's not enough data in the out buffer to bother to send
# right now.
flush = None
if flush:
try:
flush()
except socket.error:
if self.adj.log_socket_errors:
self.logger.exception('Socket error')
self.will_close = True
except:
self.logger.exception('Unexpected exception when flushing')
self.will_close = True
if self.close_when_flushed and not self.any_outbuf_has_data():
self.close_when_flushed = False
self.will_close = True
if self.will_close:
self.handle_close()
def readable(self):
# We might want to create a new task. We can only do this if:
# 1. We're not already about to close the connection.
# 2. There's no already currently running task(s).
# 3. There's no data in the output buffer that needs to be sent
# before we potentially create a new task.
return not (self.will_close or self.requests or
self.any_outbuf_has_data())
def handle_read(self):
try:
data = self.recv(self.adj.recv_bytes)
except socket.error:
if self.adj.log_socket_errors:
self.logger.exception('Socket error')
self.handle_close()
return
if data:
self.last_activity = time.time()
self.received(data)
def received(self, data):
"""
Receives input asynchronously and assigns one or more requests to the
channel.
"""
# Preconditions: there's no task(s) already running
request = self.request
requests = []
if not data:
return False
while data:
if request is None:
request = self.parser_class(self.adj)
n = request.received(data)
if request.expect_continue and request.headers_finished:
# guaranteed by parser to be a 1.1 request
request.expect_continue = False
if not self.sent_continue:
# there's no current task, so we don't need to try to
# lock the outbuf to append to it.
self.outbufs[-1].append(b'HTTP/1.1 100 Continue\r\n\r\n')
self.sent_continue = True
self._flush_some()
request.completed = False
if request.completed:
# The request (with the body) is ready to use.
self.request = None
if not request.empty:
requests.append(request)
request = None
else:
self.request = request
if n >= len(data):
break
data = data[n:]
if requests:
self.requests = requests
self.server.add_task(self)
return True
def _flush_some_if_lockable(self):
# Since our task may be appending to the outbuf, we try to acquire
# the lock, but we don't block if we can't.
locked = self.outbuf_lock.acquire(0)
if locked:
try:
self._flush_some()
finally:
self.outbuf_lock.release()
def _flush_some(self):
# Send as much data as possible to our client
sent = 0
dobreak = False
while True:
outbuf = self.outbufs[0]
# use outbuf.__len__ rather than len(outbuf) FBO of not getting
# OverflowError on Python 2
outbuflen = outbuf.__len__()
if outbuflen <= 0:
# self.outbufs[-1] must always be a writable outbuf
if len(self.outbufs) > 1:
toclose = self.outbufs.pop(0)
try:
toclose.close()
except:
self.logger.exception(
'Unexpected error when closing an outbuf')
continue # pragma: no cover (coverage bug, it is hit)
else:
dobreak = True
while outbuflen > 0:
chunk = outbuf.get(self.adj.send_bytes)
num_sent = self.send(chunk)
if num_sent:
outbuf.skip(num_sent, True)
outbuflen -= num_sent
sent += num_sent
else:
dobreak = True
break
if dobreak:
break
if sent:
self.last_activity = time.time()
return True
return False
def handle_close(self):
for outbuf in self.outbufs:
try:
outbuf.close()
except:
self.logger.exception(
'Unknown exception while trying to close outbuf')
self.connected = False
asyncore.dispatcher.close(self)
def add_channel(self, map=None):
"""See asyncore.dispatcher
This hook keeps track of opened channels.
"""
asyncore.dispatcher.add_channel(self, map)
self.server.active_channels[self._fileno] = self
def del_channel(self, map=None):
"""See asyncore.dispatcher
This hook keeps track of closed channels.
"""
fd = self._fileno # next line sets this to None
asyncore.dispatcher.del_channel(self, map)
ac = self.server.active_channels
if fd in ac:
del ac[fd]
#
# SYNCHRONOUS METHODS
#
def write_soon(self, data):
if data:
# the async mainloop might be popping data off outbuf; we can
# block here waiting for it because we're in a task thread
with self.outbuf_lock:
if data.__class__ is ReadOnlyFileBasedBuffer:
# they used wsgi.file_wrapper
self.outbufs.append(data)
nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
self.outbufs.append(nextbuf)
else:
self.outbufs[-1].append(data)
# XXX We might eventually need to pull the trigger here (to
# instruct select to stop blocking), but it slows things down so
# much that I'll hold off for now; "server push" on otherwise
# unbusy systems may suffer.
return len(data)
return 0
def service(self):
"""Execute all pending requests """
with self.task_lock:
while self.requests:
request = self.requests[0]
if request.error:
task = self.error_task_class(self, request)
else:
task = self.task_class(self, request)
try:
task.service()
except:
self.logger.exception('Exception when serving %s' %
task.request.path)
if not task.wrote_header:
if self.adj.expose_tracebacks:
body = traceback.format_exc()
else:
body = ('The server encountered an unexpected '
'internal server error')
req_version = request.version
req_headers = request.headers
request = self.parser_class(self.adj)
request.error = InternalServerError(body)
# copy some original request attributes to fulfill
# HTTP 1.1 requirements
request.version = req_version
try:
request.headers['CONNECTION'] = req_headers[
'CONNECTION']
except KeyError:
pass
task = self.error_task_class(self, request)
task.service() # must not fail
else:
task.close_on_finish = True
# we cannot allow self.requests to drop to empty til
# here; otherwise the mainloop gets confused
if task.close_on_finish:
self.close_when_flushed = True
for request in self.requests:
request.close()
self.requests = []
else:
request = self.requests.pop(0)
request.close()
self.force_flush = True
self.server.pull_trigger()
self.last_activity = time.time()
def cancel(self):
""" Cancels all pending requests """
self.force_flush = True
self.last_activity = time.time()
self.requests = []
def defer(self):
pass
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import coloredlogs
import csv
import datetime
import github
import github_fetch_artifacts
import io
import logging
import os
import re
import stat
import subprocess
import traceback
import zipfile
LOG_KEEP_DAYS = 3
BINARY_KEEP_DAYS = 30
# Count is reasonably large because each build has multiple artifacts
# Currently (Sep 2020) each build has 4 artifacts:
# gn-nrf, gn-linux, examples-esp32, example-nrf
#
# We should eventually remove the non-gn version to save space.
BINARY_MAX_COUNT = 80
class SectionChange:
"""Describes delta changes to a specific section"""
def __init__(self, section, fileChange, vmChange):
self.section = section
self.fileChange = fileChange
self.vmChange = vmChange
class ComparisonResult:
"""Comparison results for an entire file"""
def __init__(self, name):
self.fileName = name
self.sectionChanges = []
SECTIONS_TO_WATCH = set(
['.rodata', '.text', '.flash.rodata', '.flash.text', '.bss', '.data'])
def filesInDirectory(dirName):
"""Get all the file names in the specified directory."""
for name in os.listdir(dirName):
mode = os.stat(os.path.join(dirName, name)).st_mode
if stat.S_ISREG(mode):
yield name
def writeFileBloatReport(f, baselineName, buildName):
"""Generate a bloat report diffing a baseline file with a build output file."""
logging.info('Running bloaty diff between %s and %s',
baselineName, buildName)
f.write('Comparing %s and %s:\n\n' % (baselineName, buildName))
result = subprocess.run(
['bloaty', '--csv', buildName, '--', baselineName],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
if result.returncode != 0:
logging.warning('Bloaty execution failed: %d', result.returncode)
f.write('BLOAT EXECUTION FAILED WITH CODE %d:\n' % result.returncode)
content = result.stdout.decode('utf8')
f.write(content)
f.write('\n')
result = ComparisonResult(os.path.basename(buildName))
try:
reader = csv.reader(io.StringIO(content))
for row in reader:
section, vm, f = row
if (section in SECTIONS_TO_WATCH) or (vm not in ['0', 'vmsize']):
result.sectionChanges.append(
SectionChange(section, int(f), int(vm)))
except Exception:
pass
return result
def generateBloatReport(outputFileName,
baselineDir,
buildOutputDir,
title='BLOAT REPORT'):
"""Generates a bloat report fo files between two diferent directories."""
logging.info('Generating bloat diff report between %s and %s', baselineDir,
buildOutputDir)
with open(outputFileName, 'wt') as f:
f.write(title + '\n\n')
baselineNames = set([name for name in filesInDirectory(baselineDir)])
outputNames = set([name for name in filesInDirectory(buildOutputDir)])
baselineOnly = baselineNames - outputNames
if baselineOnly:
logging.warning(
'Some files only exist in the baseline: %r', baselineOnly)
f.write('Files found only in the baseline:\n ')
f.write('\n %s'.join(baselineOnly))
f.write('\n\n')
outputOnly = outputNames - baselineNames
if outputOnly:
logging.warning('Some files only exist in the build output: %r',
outputOnly)
f.write('Files found only in the build output:\n ')
f.write('\n %s'.join(outputOnly))
f.write('\n\n')
results = []
for name in (baselineNames & outputNames):
results.append(
writeFileBloatReport(f, os.path.join(baselineDir, name),
os.path.join(buildOutputDir, name)))
return results
def sendFileAsPrComment(job_name, filename, gh_token, gh_repo, gh_pr_number,
compare_results, base_sha):
"""Generates a PR comment containing the specified file content."""
logging.info('Uploading report to "%s", PR %d', gh_repo, gh_pr_number)
rawText = open(filename, 'rt').read()
# a consistent title to help identify obsolete comments
titleHeading = 'Size increase report for "{jobName}"'.format(
jobName=job_name)
api = github.Github(gh_token)
repo = api.get_repo(gh_repo)
pull = repo.get_pull(gh_pr_number)
for comment in pull.get_issue_comments():
if not comment.body.startswith(titleHeading):
continue
logging.info(
'Removing obsolete comment with heading "%s"', (titleHeading))
comment.delete()
if all(len(file.sectionChanges) == 0 for file in compare_results):
logging.info('No results to report')
return
compareTable = 'File | Section | File | VM\n---- | ---- | ----- | ---- \n'
for file in compare_results:
for change in file.sectionChanges:
compareTable += '{0} | {1} | {2} | {3}\n'.format(file.fileName,
change.section,
change.fileChange,
change.vmChange)
# NOTE: PRs are issues with attached patches, hence the API naming
pull.create_issue_comment("""{title} from {baseSha}
{table}
<details>
<summary>Full report output</summary>
```
{rawReportText}
```
</details>
""".format(title=titleHeading, baseSha=base_sha, table=compareTable, rawReportText=rawText))
def getPullRequestBaseSha(githubToken, githubRepo, pullRequestNumber):
"""Figure out the SHA for the base of a pull request"""
api = github.Github(githubToken)
repo = api.get_repo(githubRepo)
pull = repo.get_pull(pullRequestNumber)
return pull.base.sha
def cleanDir(name):
"""Ensures a clean directory with the given name exists. Only handles files"""
if os.path.exists(name):
for fname in os.listdir(name):
path = os.path.join(name, fname)
if os.path.isfile(path):
os.unlink(path)
else:
os.mkdir(name)
def downloadArtifact(artifact, dirName):
"""Extract an artifact into a directory."""
zipFile = zipfile.ZipFile(io.BytesIO(artifact.downloadBlob()), 'r')
logging.info('Extracting zip file to %r' % dirName)
zipFile.extractall(dirName)
def main():
"""Main task if executed standalone."""
parser = argparse.ArgumentParser(
description='Fetch master build artifacts.')
parser.add_argument(
'--output-dir',
type=str,
default='.',
help='Where to download the artifacts')
parser.add_argument(
'--github-api-token',
type=str,
help='Github API token to upload the report as a comment')
parser.add_argument(
'--github-repository', type=str, help='Repository to use for PR comments')
parser.add_argument(
'--log-level',
default=logging.INFO,
type=lambda x: getattr(logging, x),
help='Configure the logging level.')
args = parser.parse_args()
# Ensures somewhat pretty logging of what is going on
logging.basicConfig(
level=args.log_level,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
coloredlogs.install()
if not args.github_api_token:
logging.error(
'Required arguments missing: github api token is required.')
return
# all known artifacts
artifacts = [a for a in github_fetch_artifacts.getAllArtifacts(
args.github_api_token, args.github_repository)]
# process newest artifacts first
artifacts.sort(key=lambda x: x.created_at, reverse=True)
current_time = datetime.datetime.now()
seen_names = set()
pull_artifact_re = re.compile('^(.*)-pull-(\\d+)$')
binary_count = 0
for a in artifacts:
# Ignore size reports; they are handled by a separate script.
if a.name.startswith('Size,'):
continue
# logs cleanup after 3 days
is_log = a.name.endswith('-logs')
if not is_log:
binary_count = binary_count + 1
need_delete = False
if (current_time - a.created_at).days > BINARY_KEEP_DAYS:
# Do not keep binary builds forever
need_delete = True
elif not is_log and binary_count > BINARY_MAX_COUNT:
# Keep a maximum number of binary packages
need_delete = True
elif is_log and (current_time - a.created_at).days > LOG_KEEP_DAYS:
# Logs are kept even shorter
need_delete = True
if need_delete:
logging.info('Old artifact: %s from %r' % (a.name, a.created_at))
a.delete()
continue
if a.name.endswith('-logs'):
# logs names are duplicate, however that is fine
continue
if a.name in seen_names:
logging.info('Artifact name already seen before: %s' % a.name)
a.delete()
continue
seen_names.add(a.name)
m = pull_artifact_re.match(a.name)
if not m:
logging.info('Non-PR artifact found: %r from %r' %
(a.name, a.created_at))
continue
prefix = m.group(1)
pull_number = int(m.group(2))
logging.info('Processing PR %s via artifact %r' %
(pull_number, a.name))
try:
base_sha = getPullRequestBaseSha(
args.github_api_token, args.github_repository, pull_number)
base_artifact_name = '%s-%s' % (prefix, base_sha)
base_artifacts = [
v for v in artifacts if v.name == base_artifact_name]
if len(base_artifacts) != 1:
raise Exception('Did not find exactly one artifact for %s: %r' % (
base_artifact_name, [v.name for v in base_artifacts]))
b = base_artifacts[0]
logging.info('Diff will be against artifact %r' % b.name)
aOutput = os.path.join(args.output_dir, 'pull_artifact')
bOutput = os.path.join(args.output_dir, 'master_artifact')
cleanDir(aOutput)
cleanDir(bOutput)
downloadArtifact(a, aOutput)
downloadArtifact(b, bOutput)
report_name = os.path.join(aOutput, 'report.csv')
results = generateBloatReport(report_name, bOutput, aOutput)
sendFileAsPrComment(prefix, report_name, args.github_api_token,
args.github_repository, pull_number, results, base_sha)
# If running over a top level directory, ensure git sees no output
cleanDir(aOutput)
cleanDir(bOutput)
# Output processed.
a.delete()
except Exception as e:
tb = traceback.format_exc()
logging.warning('Failed to process bloat report: %s', tb)
if __name__ == '__main__':
# execute only if run as a script
main()
|
|
# -*- coding: utf-8 -*-
from cms.utils.conf import get_cms_setting
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import signals
from django.dispatch import Signal
from cms.cache.permissions import clear_user_permission_cache, clear_permission_cache
from cms.models import Page, Title, CMSPlugin, PagePermission, GlobalPagePermission, PageUser, PageUserGroup
from django.conf import settings
from menus.menu_pool import menu_pool
# fired after page location is changed - is moved from one node to other
page_moved = Signal(providing_args=["instance"])
# fired when some of nodes (Title) with applications gets saved
application_post_changed = Signal(providing_args=["instance"])
# fired after page gets published - copied to public model - there may be more
# than one instances published before this signal gets called
post_publish = Signal(providing_args=["instance"])
def update_plugin_positions(**kwargs):
plugin = kwargs['instance']
plugins = CMSPlugin.objects.filter(language=plugin.language, placeholder=plugin.placeholder).order_by("position")
last = 0
for p in plugins:
if p.position != last:
p.position = last
p.save()
last += 1
signals.post_delete.connect(update_plugin_positions, sender=CMSPlugin, dispatch_uid="cms.plugin.update_position")
def update_title_paths(instance, **kwargs):
"""Update child pages paths in case when page was moved.
"""
for title in instance.title_set.all():
title.save()
page_moved.connect(update_title_paths, sender=Page, dispatch_uid="cms.title.update_path")
def update_title(title):
slug = u'%s' % title.slug
if title.page.is_home():
title.path = ''
elif not title.has_url_overwrite:
title.path = u'%s' % slug
parent_page_id = title.page.parent_id
if parent_page_id:
parent_title = Title.objects.get_title(parent_page_id,
language=title.language, language_fallback=True)
if parent_title:
title.path = (u'%s/%s' % (parent_title.path, slug)).lstrip("/")
def pre_save_title(instance, raw, **kwargs):
"""Save old state to instance and setup path
"""
if not instance.page.publisher_is_draft:
menu_pool.clear(instance.page.site_id)
if instance.id and not hasattr(instance, "tmp_path"):
instance.tmp_path = None
instance.tmp_application_urls = None
try:
instance.tmp_path, instance.tmp_application_urls = \
Title.objects.filter(pk=instance.id).values_list('path', 'application_urls')[0]
except IndexError:
pass # no Titles exist for this page yet
# Build path from parent page's path and slug
if instance.has_url_overwrite and instance.path:
instance.path = instance.path.strip(" /")
else:
update_title(instance)
signals.pre_save.connect(pre_save_title, sender=Title, dispatch_uid="cms.title.presave")
def post_save_title(instance, raw, created, **kwargs):
# Update descendants only if path changed
application_changed = False
prevent_descendants = hasattr(instance, 'tmp_prevent_descendant_update')
if instance.path != getattr(instance, 'tmp_path', None) and not prevent_descendants:
descendant_titles = Title.objects.filter(
page__lft__gt=instance.page.lft,
page__rght__lt=instance.page.rght,
page__tree_id__exact=instance.page.tree_id,
language=instance.language,
has_url_overwrite=False, # TODO: what if child has no url overwrite?
).order_by('page__tree_id', 'page__parent', 'page__lft')
for descendant_title in descendant_titles:
descendant_title.path = '' # just reset path
descendant_title.tmp_prevent_descendant_update = True
if descendant_title.application_urls:
application_changed = True
descendant_title.save()
if not prevent_descendants and \
(instance.application_urls != getattr(instance, 'tmp_application_urls', None) or application_changed):
# fire it if we have some application linked to this page or some descendant
application_post_changed.send(sender=Title, instance=instance)
# remove temporary attributes
if hasattr(instance, 'tmp_path'):
del instance.tmp_path
if hasattr(instance, 'tmp_application_urls'):
del instance.tmp_application_urls
if prevent_descendants:
del instance.tmp_prevent_descendant_update
signals.post_save.connect(post_save_title, sender=Title, dispatch_uid="cms.title.postsave")
def post_save_user(instance, raw, created, **kwargs):
"""Signal called when new user is created, required only when CMS_PERMISSION.
Assigns creator of the user to PageUserInfo model, so we know who had created
this user account.
requires: CurrentUserMiddleware
"""
from cms.utils.permissions import get_current_user
# read current user from thread locals
creator = get_current_user()
if not creator or not created or creator.is_anonymous():
return
page_user = PageUser(user_ptr_id=instance.pk, created_by=creator)
page_user.__dict__.update(instance.__dict__)
page_user.save()
def post_save_user_group(instance, raw, created, **kwargs):
"""The same like post_save_user, but for Group, required only when
CMS_PERMISSION.
Assigns creator of the group to PageUserGroupInfo model, so we know who had
created this user account.
requires: CurrentUserMiddleware
"""
from cms.utils.permissions import get_current_user
# read current user from thread locals
creator = get_current_user()
if not creator or not created or creator.is_anonymous():
return
page_user = PageUserGroup(group_ptr_id=instance.pk, created_by=creator)
page_user.__dict__.update(instance.__dict__)
page_user.save()
if get_cms_setting('PERMISSION'):
# only if permissions are in use
from django.contrib.auth.models import User, Group
# register signals to user related models
signals.post_save.connect(post_save_user, User)
signals.post_save.connect(post_save_user_group, Group)
def pre_save_page(instance, raw, **kwargs):
"""Assigns old_page attribute, so we can compare changes.
"""
instance.old_page = None
try:
instance.old_page = Page.objects.get(pk=instance.pk)
except ObjectDoesNotExist:
pass
def post_save_page_moderator(instance, raw, created, **kwargs):
"""Helper post save signal.
"""
old_page = instance.old_page
# tell moderator something was happen with this page
from cms.utils.moderator import page_changed
if not old_page:
page_changed(instance, old_page)
def post_save_page(instance, **kwargs):
if instance.old_page is None or instance.old_page.parent_id != instance.parent_id:
for page in instance.get_descendants(include_self=True):
for title in page.title_set.all():
update_title(title)
title.save()
def update_placeholders(instance, **kwargs):
instance.rescan_placeholders()
def invalidate_menu_cache(instance, **kwargs):
menu_pool.clear(instance.site_id)
# tell moderator, there is something happening with this page
signals.pre_save.connect(pre_save_page, sender=Page, dispatch_uid="cms.page.presave")
signals.post_save.connect(post_save_page_moderator, sender=Page, dispatch_uid="cms.page.postsave")
signals.post_save.connect(post_save_page, sender=Page)
signals.post_save.connect(update_placeholders, sender=Page)
signals.pre_save.connect(invalidate_menu_cache, sender=Page)
signals.pre_delete.connect(invalidate_menu_cache, sender=Page)
def pre_save_user(instance, raw, **kwargs):
clear_user_permission_cache(instance)
def pre_delete_user(instance, **kwargs):
clear_user_permission_cache(instance)
def pre_save_group(instance, raw, **kwargs):
if instance.pk:
for user in instance.user_set.all():
clear_user_permission_cache(user)
def pre_delete_group(instance, **kwargs):
for user in instance.user_set.all():
clear_user_permission_cache(user)
def _clear_users_permissions(instance):
if instance.user:
clear_user_permission_cache(instance.user)
if instance.group:
for user in instance.group.user_set.all():
clear_user_permission_cache(user)
def pre_save_pagepermission(instance, raw, **kwargs):
_clear_users_permissions(instance)
def pre_delete_pagepermission(instance, **kwargs):
_clear_users_permissions(instance)
def pre_save_globalpagepermission(instance, raw, **kwargs):
_clear_users_permissions(instance)
menu_pool.clear(all=True)
def pre_delete_globalpagepermission(instance, **kwargs):
_clear_users_permissions(instance)
def pre_save_delete_page(instance, **kwargs):
clear_permission_cache()
def post_revision(instances, **kwargs):
for inst in instances:
if isinstance(inst, Page):
page = Page.objects.get(pk=inst.pk)
page.revision_id = 0
page._publisher_keep_state = True
page.save()
return
if get_cms_setting('PERMISSION'):
signals.pre_save.connect(pre_save_user, sender=User)
signals.pre_delete.connect(pre_delete_user, sender=User)
signals.pre_save.connect(pre_save_user, sender=PageUser)
signals.pre_delete.connect(pre_delete_user, sender=PageUser)
signals.pre_save.connect(pre_save_group, sender=Group)
signals.pre_delete.connect(pre_delete_group, sender=Group)
signals.pre_save.connect(pre_save_group, sender=PageUserGroup)
signals.pre_delete.connect(pre_delete_group, sender=PageUserGroup)
signals.pre_save.connect(pre_save_pagepermission, sender=PagePermission)
signals.pre_delete.connect(pre_delete_pagepermission, sender=PagePermission)
signals.pre_save.connect(pre_save_globalpagepermission, sender=GlobalPagePermission)
signals.pre_delete.connect(pre_delete_globalpagepermission, sender=GlobalPagePermission)
signals.pre_save.connect(pre_save_delete_page, sender=Page)
signals.pre_delete.connect(pre_save_delete_page, sender=Page)
if 'reversion' in settings.INSTALLED_APPS:
from reversion.models import post_revision_commit
post_revision_commit.connect(post_revision)
|
|
"""
Define a base command class that:
1) provides a consistent interface with `git`,
2) implements common git operations in one place, and
3) tracks file- and repo- specific data the is necessary
for Git operations.
"""
import os
import subprocess
import shutil
import sublime
from ..common import util
from .git_mixins.status import StatusMixin
from .git_mixins.active_branch import ActiveBranchMixin
from .git_mixins.branches import BranchesMixin
from .git_mixins.stash import StashMixin
from .git_mixins.stage_unstage import StageUnstageMixin
from .git_mixins.checkout_discard import CheckoutDiscardMixin
from .git_mixins.remotes import RemotesMixin
from .git_mixins.ignore import IgnoreMixin
from .git_mixins.tags import TagsMixin
from .git_mixins.history import HistoryMixin
from .git_mixins.rewrite import RewriteMixin
from .git_mixins.merge import MergeMixin
git_path = None
class GitSavvyError(Exception):
pass
class GitCommand(StatusMixin,
ActiveBranchMixin,
BranchesMixin,
StashMixin,
StageUnstageMixin,
CheckoutDiscardMixin,
RemotesMixin,
IgnoreMixin,
TagsMixin,
HistoryMixin,
RewriteMixin,
MergeMixin
):
"""
Base class for all Sublime commands that interact with git.
"""
_last_remotes_used = {}
def git(self, *args, stdin=None, working_dir=None, show_panel=False, throw_on_stderr=True):
"""
Run the git command specified in `*args` and return the output
of the git command as a string.
If stdin is provided, it should be a string and will be piped to
the git process. If `working_dir` is provided, set this as the
current working directory for the git process; otherwise,
the `repo_path` value will be used.
"""
args = self._include_global_flags(args)
command = (self.git_binary_path, ) + tuple(arg for arg in args if arg)
command_str = " ".join(command)
gitsavvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
show_panel_overrides = gitsavvy_settings.get("show_panel_for")
show_panel = show_panel or args[0] in show_panel_overrides
stdout, stderr = None, None
def raise_error(msg):
if type(msg) == str and "fatal: Not a git repository" in msg:
sublime.set_timeout_async(
lambda: sublime.active_window().run_command("gs_offer_init"))
elif type(msg) == str and "*** Please tell me who you are." in msg:
sublime.set_timeout_async(
lambda: sublime.active_window().run_command("gs_setup_user"))
sublime.status_message(
"Failed to run `git {}`. See log for details.".format(command[1])
)
util.log.panel(msg)
util.debug.log_error(msg)
raise GitSavvyError(msg)
try:
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir or self.repo_path,
env=os.environ,
startupinfo=startupinfo)
stdout, stderr = p.communicate(stdin.encode(encoding="UTF-8") if stdin else None)
stdout, stderr = self.decode_stdout(stdout), stderr.decode()
except Exception as e:
raise_error(e)
finally:
util.debug.log_git(args, stdin, stdout, stderr)
if not p.returncode == 0 and throw_on_stderr:
raise_error("`{}` failed with following output:\n{}\n{}".format(
command_str, stdout, stderr
))
if show_panel:
if gitsavvy_settings.get("show_input_in_output"):
util.log.panel("> {}\n{}\n{}".format(command_str, stdout, stderr))
else:
util.log.panel("{}\n{}".format(stdout, stderr))
return stdout
def decode_stdout(self, stdout):
try:
return stdout.decode()
except UnicodeDecodeError as err:
msg = (
"GitSavvy was unable to parse content successfully. Would you "
"like to fallback to the default encoding? Text may not "
"appear as expected."
)
if sublime.ok_cancel_dialog(msg, "Fallback?"):
return stdout.decode("windows-1252")
raise err
@property
def encoding(self):
return "UTF-8"
@property
def git_binary_path(self):
"""
Return the path to the available `git` binary.
"""
global git_path
if not git_path:
git_path_setting = sublime.load_settings("GitSavvy.sublime-settings").get("git_path")
if isinstance(git_path_setting, dict):
git_path = git_path_setting.get(sublime.platform())
if not git_path:
git_path = git_path_setting.get('default')
else:
git_path = git_path_setting
if not git_path:
git_path = shutil.which("git")
if not git_path:
msg = ("Your Git binary cannot be found. If it is installed, add it "
"to your PATH environment variable, or add a `git_path` setting "
"in the `User/GitSavvy.sublime-settings` file.")
sublime.error_message(msg)
raise ValueError("Git binary not found.")
return git_path
@property
def repo_path(self):
return self._repo_path()
@property
def short_repo_path(self):
if "HOME" in os.environ:
return self.repo_path.replace(os.environ["HOME"], "~")
else:
return self.repo_path
def _repo_path(self, throw_on_stderr=True):
"""
Return the absolute path to the git repo that contains the file that this
view interacts with. Like `file_path`, this can be overridden by setting
the view's `git_savvy.repo_path` setting.
"""
def invalid_repo():
if throw_on_stderr:
raise ValueError("Unable to determine Git repo path.")
return None
# The below condition will be true if run from a WindowCommand and false
# from a TextCommand.
view = self.window.active_view() if hasattr(self, "window") else self.view
repo_path = view.settings().get("git_savvy.repo_path")
if not repo_path or not os.path.exists(repo_path):
file_path = self.file_path
file_dir = os.path.dirname(file_path) if file_path else None
working_dir = file_path and os.path.isdir(file_dir) and file_dir
if not working_dir:
window_folders = sublime.active_window().folders()
if not window_folders or not os.path.isdir(window_folders[0]):
return invalid_repo()
working_dir = window_folders[0]
stdout = self.git(
"rev-parse",
"--show-toplevel",
working_dir=working_dir,
throw_on_stderr=throw_on_stderr
)
repo_path = stdout.strip()
if not repo_path:
return invalid_repo()
view.settings().set("git_savvy.repo_path", repo_path)
return repo_path
@property
def file_path(self):
"""
Return the absolute path to the file this view interacts with. In most
cases, this will be the open file. However, for views with special
functionality, this default behavior can be overridden by setting the
view's `git_savvy.file_path` setting.
"""
# The below condition will be true if run from a WindowCommand and false
# from a TextCommand.
view = self.window.active_view() if hasattr(self, "window") else self.view
fpath = view.settings().get("git_savvy.file_path")
if not fpath:
fpath = view.file_name()
view.settings().set("git_savvy.file_path", fpath)
return fpath
def get_rel_path(self, abs_path=None):
"""
Return the file path relative to the repo root.
"""
path = abs_path or self.file_path
return os.path.relpath(path, start=self.repo_path)
def _include_global_flags(self, args):
"""
Transforms the Git command arguments with flags indicated in the
global GitSavvy settings.
"""
git_cmd, *addl_args = args
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
global_flags = savvy_settings.get("global_flags")
if global_flags and git_cmd in global_flags:
args = [git_cmd] + global_flags[git_cmd] + addl_args
return args
@property
def last_remote_used(self):
"""
With this getter and setter, keep global track of last remote used
for each repo. Will return whatever was set last, or "origin" if
never set.
"""
return self._last_remotes_used.get(self.repo_path, "origin")
@last_remote_used.setter
def last_remote_used(self, value):
"""
Setter for above property. Saves per-repo information in
class attribute dict.
"""
self._last_remotes_used[self.repo_path] = value
|
|
#!/usr/bin/env python
from __future__ import print_function
import requests
import hashlib
import re
import os
import sys
API_KEY = "0d9c21f4af919c86946abbe45e226713"
SHARED_SECRET = "d59f49d7997ac6254a05355e60a8ec45"
APP_NAME = "cmus-scrobble"
REGISTERED_TO = "drooool"
URL_API_ROOT = "http://ws.audioscrobbler.com/2.0/"
URL_AUTHORIZATION = "http://www.last.fm/api/auth/"
CONFIG_DIR = os.path.expanduser("~/.cmus-scrobble/")
SESSION_KEY_PATH = os.path.join(CONFIG_DIR, "session")
TOKEN_KEY_PATH = os.path.join(CONFIG_DIR, "token")
class TokenRequestException(Exception):
pass
class NotAuthenticatedException(Exception):
pass
class AuthenticationException(Exception):
pass
class ScrobbleException(Exception):
pass
class NowPlayingException(Exception):
pass
class LastFMInstance:
def __init__(self):
self.sessionKey = None
self.checkSession()
def checkSession(self):
if self.sessionExists: # User authenticated and session key obtained
self.sessionKey = open(SESSION_KEY_PATH).read()
elif self.tokenExists: # User possibly has authenticated and session key hasn't been obtained yet
self.fetchSession()
else: # User has definitively not authenticated. Ask and quit
self.requestAuthorization()
print("Starting cmus-scrobble")
def scrobble(self, artist=None, album=None, title=None, started=None):
if self.sessionExists:
self.postScrobble(artist, album, title, started)
else:
self.checkSession()
def postScrobble(self, artist, album, title, started):
args = {
'method': 'track.scrobble',
'artist': artist,
'album': album,
'track': title,
'timestamp': started,
'api_key': API_KEY,
'sk': self.sessionKey
}
self.addSignature(args)
try:
scrobbleResponse = requests.post(URL_API_ROOT, args)
if scrobbleResponse.status_code == 200:
print("Scrobbled:", artist, "-", title, "(" + album + ")")
except requests.RequestException:
raise ScrobbleException
def updateNowPlaying(self, artist=None, album=None, title=None):
if self.sessionExists:
self.postNowPlaying(artist, album, title)
else:
self.checkSession()
def postNowPlaying(self, artist, album, title):
args = {
'method': 'track.updateNowPlaying',
'artist': artist,
'album': album,
'track': title,
'api_key': API_KEY,
'sk': self.sessionKey
}
self.addSignature(args)
try:
nowPlayingResponse = requests.post(URL_API_ROOT, args)
except requests.RequestException:
raise NowPlayingException
## Authentication methods
##
def requestToken(self):
try:
args = {
'method': 'auth.gettoken',
'api_key': API_KEY,
'format': 'json'
}
self.addSignature(args)
tokenResponse = requests.get(URL_API_ROOT, args)
if tokenResponse.status_code != 200:
raise TokenRequestException
token = tokenResponse.json()["token"]
if not self.configDirExists:
os.makedirs(CONFIG_DIR)
tokenFile = open(TOKEN_KEY_PATH, "w+")
tokenFile.write(token)
tokenFile.close()
except requests.RequestException:
raise TokenRequestException
def requestAuthorization(self): # Exits the programs
print("Please allow cmus-scrobble to access your Last.fm account")
print(self.authorizationURL)
print("Exiting program..")
exit(0)
def fetchSession(self):
args = {
'method': 'auth.getSession',
'api_key': API_KEY,
'token': self.token,
}
self.addSignature(args)
try:
sessionResponse = requests.get(URL_API_ROOT, args)
sessionResponse = sessionResponse.content.decode()
except requests.RequestException:
raise AuthenticationException
if "<lfm status=\"ok\">" in sessionResponse:
pattern = re.compile("<key>([0-9a-f]+)</key>")
sessionKey = pattern.search(sessionResponse).group(1)
if not self.configDirExists:
os.makedirs(CONFIG_DIR)
sessionFile = open(SESSION_KEY_PATH, "w+")
sessionFile.write(sessionKey)
sessionFile.close()
if self.tokenExists:
os.remove(TOKEN_KEY_PATH)
else:
pattern = re.compile("<error code=\"([0-9]+)\">")
errorCode = pattern.search(sessionResponse).group(1)
if errorCode == "14": # Not authorized yet by user
self.requestAuthorization()
elif errorCode == "15" or errorCode == "4": # Token has expired or is invalid
if self.tokenExists:
os.remove(TOKEN_KEY_PATH)
self.checkSession()
else:
raise AuthenticationException
## Helper methods
##
@staticmethod
def addSignature(args):
signatureStr = ""
for key in sorted(args.keys()):
signatureStr += key
signatureStr += args[key]
signatureStr += SHARED_SECRET
args['api_sig'] = hashlib.md5(signatureStr.encode("utf-8")).hexdigest()
@property
def token(self):
token = ""
if self.tokenExists:
tokenFile = open(TOKEN_KEY_PATH, "r+")
token = tokenFile.read()
return token if len(token) > 1 else None
@property
def authorizationURL(self):
if not self.tokenExists:
self.requestToken()
return "{0}?api_key={1}&token={2}".format(URL_AUTHORIZATION, API_KEY, self.token) \
if self.token is not None else None
@property
def tokenExists(self):
return os.path.exists(TOKEN_KEY_PATH)
@property
def sessionExists(self):
return os.path.exists(SESSION_KEY_PATH)
@property
def configDirExists(self):
return os.path.exists(CONFIG_DIR)
|
|
"""Test the tplink config flow."""
from unittest.mock import patch
import pytest
from homeassistant import config_entries, setup
from homeassistant.components import dhcp
from homeassistant.components.tplink import DOMAIN
from homeassistant.const import CONF_DEVICE, CONF_HOST, CONF_MAC, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_ABORT, RESULT_TYPE_FORM
from . import (
ALIAS,
DEFAULT_ENTRY_TITLE,
IP_ADDRESS,
MAC_ADDRESS,
MODULE,
_patch_discovery,
_patch_single_discovery,
)
from tests.common import MockConfigEntry
async def test_discovery(hass: HomeAssistant):
"""Test setting up discovery."""
with _patch_discovery(), _patch_single_discovery():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
# test we can try again
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
with _patch_discovery(), _patch_single_discovery(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_DEVICE: MAC_ADDRESS},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == DEFAULT_ENTRY_TITLE
assert result3["data"] == {CONF_HOST: IP_ADDRESS}
mock_setup.assert_called_once()
mock_setup_entry.assert_called_once()
# ignore configured devices
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_single_discovery():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_discovery_with_existing_device_present(hass: HomeAssistant):
"""Test setting up discovery."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: "127.0.0.2"}, unique_id="dd:dd:dd:dd:dd:dd"
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_single_discovery(no_device=True):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_single_discovery():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
# Now abort and make sure we can start over
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_single_discovery():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
with _patch_discovery(), _patch_single_discovery(), patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_DEVICE: MAC_ADDRESS}
)
assert result3["type"] == "create_entry"
assert result3["title"] == DEFAULT_ENTRY_TITLE
assert result3["data"] == {
CONF_HOST: IP_ADDRESS,
}
await hass.async_block_till_done()
mock_setup_entry.assert_called_once()
# ignore configured devices
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_single_discovery():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_discovery_no_device(hass: HomeAssistant):
"""Test discovery without device."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with _patch_discovery(no_device=True), _patch_single_discovery():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_import(hass: HomeAssistant):
"""Test import from yaml."""
config = {
CONF_HOST: IP_ADDRESS,
}
# Cannot connect
with _patch_discovery(no_device=True), _patch_single_discovery(no_device=True):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
# Success
with _patch_discovery(), _patch_single_discovery(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_ENTRY_TITLE
assert result["data"] == {
CONF_HOST: IP_ADDRESS,
}
mock_setup.assert_called_once()
mock_setup_entry.assert_called_once()
# Duplicate
with _patch_discovery(), _patch_single_discovery():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_manual(hass: HomeAssistant):
"""Test manually setup."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
# Cannot connect (timeout)
with _patch_discovery(no_device=True), _patch_single_discovery(no_device=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
# Success
with _patch_discovery(), _patch_single_discovery(), patch(
f"{MODULE}.async_setup", return_value=True
), patch(f"{MODULE}.async_setup_entry", return_value=True):
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result4["type"] == "create_entry"
assert result4["title"] == DEFAULT_ENTRY_TITLE
assert result4["data"] == {
CONF_HOST: IP_ADDRESS,
}
# Duplicate
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with _patch_discovery(no_device=True), _patch_single_discovery(no_device=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_manual_no_capabilities(hass: HomeAssistant):
"""Test manually setup without successful get_capabilities."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(no_device=True), _patch_single_discovery(), patch(
f"{MODULE}.async_setup", return_value=True
), patch(f"{MODULE}.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_HOST: IP_ADDRESS,
}
async def test_discovered_by_discovery_and_dhcp(hass):
"""Test we get the form with discovery and abort for dhcp source when we get both."""
with _patch_discovery(), _patch_single_discovery():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DISCOVERY},
data={CONF_HOST: IP_ADDRESS, CONF_MAC: MAC_ADDRESS, CONF_NAME: ALIAS},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_single_discovery():
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
ip=IP_ADDRESS, macaddress=MAC_ADDRESS, hostname=ALIAS
),
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "already_in_progress"
with _patch_discovery(), _patch_single_discovery():
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
ip=IP_ADDRESS, macaddress="00:00:00:00:00:00", hostname="mock_hostname"
),
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_ABORT
assert result3["reason"] == "already_in_progress"
with _patch_discovery(no_device=True), _patch_single_discovery(no_device=True):
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
ip="1.2.3.5", macaddress="00:00:00:00:00:01", hostname="mock_hostname"
),
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_ABORT
assert result3["reason"] == "cannot_connect"
@pytest.mark.parametrize(
"source, data",
[
(
config_entries.SOURCE_DHCP,
dhcp.DhcpServiceInfo(ip=IP_ADDRESS, macaddress=MAC_ADDRESS, hostname=ALIAS),
),
(
config_entries.SOURCE_DISCOVERY,
{CONF_HOST: IP_ADDRESS, CONF_MAC: MAC_ADDRESS, CONF_NAME: ALIAS},
),
],
)
async def test_discovered_by_dhcp_or_discovery(hass, source, data):
"""Test we can setup when discovered from dhcp or discovery."""
with _patch_discovery(), _patch_single_discovery():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=data
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_single_discovery(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {
CONF_HOST: IP_ADDRESS,
}
assert mock_async_setup.called
assert mock_async_setup_entry.called
@pytest.mark.parametrize(
"source, data",
[
(
config_entries.SOURCE_DHCP,
dhcp.DhcpServiceInfo(ip=IP_ADDRESS, macaddress=MAC_ADDRESS, hostname=ALIAS),
),
(
config_entries.SOURCE_DISCOVERY,
{CONF_HOST: IP_ADDRESS, CONF_MAC: MAC_ADDRESS, CONF_NAME: ALIAS},
),
],
)
async def test_discovered_by_dhcp_or_discovery_failed_to_get_device(hass, source, data):
"""Test we abort if we cannot get the unique id when discovered from dhcp."""
with _patch_discovery(no_device=True), _patch_single_discovery(no_device=True):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=data
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_migration_device_online(hass: HomeAssistant):
"""Test migration from single config entry."""
config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)
config_entry.add_to_hass(hass)
config = {CONF_MAC: MAC_ADDRESS, CONF_NAME: ALIAS, CONF_HOST: IP_ADDRESS}
with _patch_discovery(), _patch_single_discovery(), patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
await setup.async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "migration"}, data=config
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == ALIAS
assert result["data"] == {
CONF_HOST: IP_ADDRESS,
}
assert len(mock_setup_entry.mock_calls) == 2
# Duplicate
with _patch_discovery(), _patch_single_discovery():
await setup.async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "migration"}, data=config
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_migration_device_offline(hass: HomeAssistant):
"""Test migration from single config entry."""
config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)
config_entry.add_to_hass(hass)
config = {CONF_MAC: MAC_ADDRESS, CONF_NAME: ALIAS, CONF_HOST: None}
with _patch_discovery(no_device=True), _patch_single_discovery(
no_device=True
), patch(f"{MODULE}.async_setup_entry", return_value=True) as mock_setup_entry:
await setup.async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "migration"}, data=config
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == ALIAS
new_entry = result["result"]
assert result["data"] == {
CONF_HOST: None,
}
assert len(mock_setup_entry.mock_calls) == 2
# Ensure a manual import updates the missing host
config = {CONF_HOST: IP_ADDRESS}
with _patch_discovery(no_device=True), _patch_single_discovery():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert new_entry.data[CONF_HOST] == IP_ADDRESS
|
|
from pathlib import Path
from pkg_resources import resource_filename
import numpy
from numpy import nan
import geopandas
import pytest
import numpy.testing as nptest
from pygridtools import misc
from pygridgen.tests import raises
from . import utils
numpy.set_printoptions(linewidth=150, nanstr='-')
try:
import pygridgen
HASPGG = True
except ImportError:
HASPGG = False
@pytest.mark.parametrize(('masked', 'z', 'triangles'), [
(None, None, False),
(False, None, False),
(None, 5, False),
(None, None, True),
])
def test_make_poly_coords_base(masked, z, triangles):
xarr = numpy.array([[1, 2], [1, 2]], dtype=float)
yarr = numpy.array([[3, 3], [4, 4]], dtype=float)
if masked is False:
xarr = numpy.ma.masked_array(xarr, mask=False)
yarr = numpy.ma.masked_array(yarr, mask=False)
if z:
expected = numpy.array([[1, 3, z], [2, 3, z], [2, 4, z], [1, 4, z]], dtype=float)
elif triangles:
expected = numpy.array([[1, 3], [2, 4], [1, 4]], dtype=float)
xarr[0, -1] = nan
yarr[0, -1] = nan
else:
expected = numpy.array([[1, 3], [2, 3], [2, 4], [1, 4]], dtype=float)
coords = misc.make_poly_coords(xarr, yarr, zpnt=z, triangles=triangles)
nptest.assert_array_equal(coords, expected)
@pytest.mark.parametrize('as_array', [True, False])
@pytest.mark.parametrize(('geom', 'geomtype', 'error'), [
([1, 2], 'Point', None),
([[1, 2], [5, 6], [5, 2]], 'LineString', None),
([[1, 2], [5, 6], [5, 2]], 'Polygon', None),
([[1, 2], [5, 6], [5, 2]], 'Circle', ValueError),
])
def test_make_record(geom, geomtype, error, as_array):
props = {'prop1': 'this string', 'prop2': 3.1415}
expected_geoms = {
'point': {
'geometry': {
'type': 'Point',
'coordinates': [1, 2]
},
'id': 1,
'properties': props
},
'linestring': {
'geometry': {
'type': 'LineString',
'coordinates': [[[1, 2], [5, 6], [5, 2]]]
},
'id': 1,
'properties': props
},
'polygon': {
'geometry': {
'type': 'Polygon',
'coordinates': [[[1, 2], [5, 6], [5, 2]]]
},
'id': 1,
'properties': props
}
}
if as_array:
geom = numpy.array(geom)
with raises(error):
record = misc.make_record(1, geom, geomtype, props)
assert record == expected_geoms[geomtype.lower()]
@pytest.mark.skipif(not HASPGG, reason='pygridgen unavailabile')
def test_interpolate_bathymetry(simple_bathy, simple_grid):
elev1 = misc.interpolate_bathymetry(None, simple_grid.x_rho, simple_grid.y_rho)
elev2 = misc.interpolate_bathymetry(simple_bathy, simple_grid.x_rho, simple_grid.y_rho)
fake_elev = numpy.ma.MaskedArray(data=numpy.zeros(simple_grid.x_rho.shape), mask=simple_grid.x_rho.mask)
real_elev = numpy.ma.masked_invalid(numpy.array([
[100.15, 100.20, nan, nan, nan, nan],
[100.20, 100.25, 100.65, 100.74, 100.83, 100.95],
[100.25, 100.30, 100.35, 100.40, 100.45, 100.50],
[100.30, 100.35, 100.40, 100.45, 100.50, 100.55],
[100.35, 100.40, nan, nan, nan, nan],
[100.40, 100.45, nan, nan, nan, nan],
[100.45, 100.50, nan, nan, nan, nan],
[100.50, 100.55, nan, nan, nan, nan]
]))
nptest.assert_array_equal(elev1, fake_elev)
assert (elev1.shape == simple_grid.x_rho.shape)
nptest.assert_array_almost_equal(elev2, real_elev, decimal=2)
@pytest.fixture
def stackgrids():
grids = {
'input': {
'g0': numpy.array([
[13.7, 13.8],
[14.7, 14.8],
[15.7, 15.8],
[16.7, 16.8],
[17.7, 17.8],
]),
'g1': numpy.array([
[6.6, 6.7, 6.8],
[7.6, 7.7, 7.8],
[8.6, 8.7, 8.8],
[9.6, 9.7, 9.8],
[10.6, 10.7, 10.8],
[11.6, 11.7, 11.8],
[12.6, 12.7, 12.8],
]),
'g2': numpy.array([
[7.9, 7.10, 7.11, 7.12, 7.13],
[8.9, 8.10, 8.11, 8.12, 8.13],
[9.9, 9.10, 9.11, 9.12, 9.13],
]),
'g3': numpy.array([
[1.4, 1.5, 1.6, 1.7, 1.8],
[2.4, 2.5, 2.6, 2.7, 2.8],
[3.4, 3.5, 3.6, 3.7, 3.8],
[4.4, 4.5, 4.6, 4.7, 4.8],
[5.4, 5.5, 5.6, 5.7, 5.8],
]),
'g4': numpy.array([
[0.0, 0.1, 0.2, 0.3],
[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3],
]),
'g5': numpy.array([
[7.14, 7.15, 7.16],
[8.14, 8.15, 8.16],
])
},
'output': {
'g1-g2Left': numpy.array([
[nan, nan, nan, nan, nan, 6.6, 6.7, 6.8],
[7.9, 7.10, 7.11, 7.12, 7.13, 7.6, 7.7, 7.8],
[8.9, 8.10, 8.11, 8.12, 8.13, 8.6, 8.7, 8.8],
[9.9, 9.10, 9.11, 9.12, 9.13, 9.6, 9.7, 9.8],
[nan, nan, nan, nan, nan, 10.6, 10.7, 10.8],
[nan, nan, nan, nan, nan, 11.6, 11.7, 11.8],
[nan, nan, nan, nan, nan, 12.6, 12.7, 12.8],
]),
'g1-g2Right': numpy.array([
[6.6, 6.7, 6.8, nan, nan, nan, nan, nan],
[7.6, 7.7, 7.8, 7.9, 7.10, 7.11, 7.12, 7.13],
[8.6, 8.7, 8.8, 8.9, 8.10, 8.11, 8.12, 8.13],
[9.6, 9.7, 9.8, 9.9, 9.10, 9.11, 9.12, 9.13],
[10.6, 10.7, 10.8, nan, nan, nan, nan, nan],
[11.6, 11.7, 11.8, nan, nan, nan, nan, nan],
[12.6, 12.7, 12.8, nan, nan, nan, nan, nan],
]),
'g0-g1': numpy.array([
[nan, 6.6, 6.7, 6.8],
[nan, 7.6, 7.7, 7.8],
[nan, 8.6, 8.7, 8.8],
[nan, 9.6, 9.7, 9.8],
[nan, 10.6, 10.7, 10.8],
[nan, 11.6, 11.7, 11.8],
[nan, 12.6, 12.7, 12.8],
[13.7, 13.8, nan, nan],
[14.7, 14.8, nan, nan],
[15.7, 15.8, nan, nan],
[16.7, 16.8, nan, nan],
[17.7, 17.8, nan, nan],
]),
'g0-g1-g2': numpy.array([
[6.6, 6.7, 6.8, nan, nan, nan, nan, nan],
[7.6, 7.7, 7.8, 7.9, 7.10, 7.11, 7.12, 7.13],
[8.6, 8.7, 8.8, 8.9, 8.10, 8.11, 8.12, 8.13],
[9.6, 9.7, 9.8, 9.9, 9.10, 9.11, 9.12, 9.13],
[10.6, 10.7, 10.8, nan, nan, nan, nan, nan],
[11.6, 11.7, 11.8, nan, nan, nan, nan, nan],
[12.6, 12.7, 12.8, nan, nan, nan, nan, nan],
[nan, 13.7, 13.8, nan, nan, nan, nan, nan],
[nan, 14.7, 14.8, nan, nan, nan, nan, nan],
[nan, 15.7, 15.8, nan, nan, nan, nan, nan],
[nan, 16.7, 16.8, nan, nan, nan, nan, nan],
[nan, 17.7, 17.8, nan, nan, nan, nan, nan],
]),
'g1-g3': numpy.array([
[nan, nan, 1.4, 1.5, 1.6, 1.7, 1.8],
[nan, nan, 2.4, 2.5, 2.6, 2.7, 2.8],
[nan, nan, 3.4, 3.5, 3.6, 3.7, 3.8],
[nan, nan, 4.4, 4.5, 4.6, 4.7, 4.8],
[nan, nan, 5.4, 5.5, 5.6, 5.7, 5.8],
[6.6, 6.7, 6.8, nan, nan, nan, nan],
[7.6, 7.7, 7.8, nan, nan, nan, nan],
[8.6, 8.7, 8.8, nan, nan, nan, nan],
[9.6, 9.7, 9.8, nan, nan, nan, nan],
[10.6, 10.7, 10.8, nan, nan, nan, nan],
[11.6, 11.7, 11.8, nan, nan, nan, nan],
[12.6, 12.7, 12.8, nan, nan, nan, nan],
]),
'g3-g4': numpy.array([
[0.0, 0.1, 0.2, 0.3, nan, nan, nan, nan, nan],
[1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8],
[2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8],
[3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8],
[nan, nan, nan, nan, 4.4, 4.5, 4.6, 4.7, 4.8],
[nan, nan, nan, nan, 5.4, 5.5, 5.6, 5.7, 5.8],
]),
'g-all': numpy.array([
[0.0, 0.1, 0.2, 0.3, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan],
[1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, nan, nan, nan, nan, nan, nan, nan, nan],
[2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, nan, nan, nan, nan, nan, nan, nan, nan],
[3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, nan, nan, nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, 4.4, 4.5, 4.6, 4.7, 4.8, nan, nan, nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, 5.4, 5.5, 5.6, 5.7, 5.8, nan, nan, nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan, 6.6, 6.7, 6.8, nan, nan, nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan, 7.6, 7.7, 7.8, 7.9, 7.10, 7.11, 7.12, 7.13, 7.14, 7.15, 7.16],
[nan, nan, nan, nan, nan, nan, 8.6, 8.7, 8.8, 8.9, 8.10, 8.11, 8.12, 8.13, 8.14, 8.15, 8.16],
[nan, nan, nan, nan, nan, nan, 9.6, 9.7, 9.8, 9.9, 9.10, 9.11, 9.12, 9.13, nan, nan, nan],
[nan, nan, nan, nan, nan, nan, 10.6, 10.7, 10.8, nan, nan, nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan, 11.6, 11.7, 11.8, nan, nan, nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan, 12.6, 12.7, 12.8, nan, nan, nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan, nan, 13.7, 13.8, nan, nan, nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan, nan, 14.7, 14.8, nan, nan, nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan, nan, 15.7, 15.8, nan, nan, nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan, nan, 16.7, 16.8, nan, nan, nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan, nan, 17.7, 17.8, nan, nan, nan, nan, nan, nan, nan, nan],
])
}
}
return grids
@pytest.mark.parametrize('idx1, idx2, how, where, shift, expected', [
('g1', 'g3', 'v', '-', 2, 'g1-g3'),
('g3', 'g1', 'v', '+', -2, 'g1-g3'),
('g1', 'g2', 'h', '-', 1, 'g1-g2Left'),
('g1', 'g2', 'h', '+', 1, 'g1-g2Right'),
('g0', 'g1', 'v', '-', 1, 'g0-g1'),
], ids=['VA-', 'VB+', 'HL-', 'HR+', 'V-easy'])
def test_padded_stack_pairs(stackgrids, idx1, idx2, how, where, shift, expected):
result = misc.padded_stack(
stackgrids['input'][idx1],
stackgrids['input'][idx2],
how=how,
where=where,
shift=shift
)
nptest.assert_array_equal(result, stackgrids['output'][expected])
def test_padded_stack_three(stackgrids):
step1 = misc.padded_stack(stackgrids['input']['g0'], stackgrids['input']['g1'],
how='v', where='-', shift=-1)
step2 = misc.padded_stack(step1, stackgrids['input']['g2'],
how='h', where='+', shift=1)
nptest.assert_array_equal(step2, stackgrids['output']['g0-g1-g2'])
def test_padded_stack_a_bunch(stackgrids):
step1 = misc.padded_stack(stackgrids['input']['g0'], stackgrids['input']['g1'],
how='v', where='-', shift=-1)
step2 = misc.padded_stack(step1, stackgrids['input']['g2'],
how='h', where='+', shift=1)
step3 = misc.padded_stack(step2, stackgrids['input']['g3'],
how='v', where='-', shift=-2)
step4 = misc.padded_stack(step3, stackgrids['input']['g4'],
how='h', where='-', shift=-1)
step5 = misc.padded_stack(step4, stackgrids['input']['g5'],
how='h', where='+', shift=7)
nptest.assert_array_equal(step5, stackgrids['output']['g-all'])
@pytest.mark.parametrize(('how', 'where'), [('junk', '+'), ('h', 'junk')])
def test_padded_stack_errors(stackgrids, how, where):
with raises(ValueError):
misc.padded_stack(stackgrids['input']['g1'], stackgrids['input']['g3'],
how=how, where=where, shift=2)
def test_padded_sum():
mask = numpy.array([
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
])
result = misc.padded_sum(mask, window=1)
expected = numpy.array([
[0, 0, 0, 2, 4, 4, 4],
[0, 0, 0, 2, 4, 4, 4],
[0, 0, 0, 2, 4, 4, 4],
[0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 2, 4, 4, 4],
[0, 0, 0, 2, 4, 4, 4],
[0, 0, 0, 2, 4, 4, 4]
])
nptest.assert_array_equal(result, expected)
@pytest.mark.parametrize('size', [5, 10])
@pytest.mark.parametrize('inside', [True, False], ids=['inside', 'outside'])
def test_mask_with_polygon(size, inside):
expected_masks = {
5: numpy.array([
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 1, 1]
], dtype=bool),
10: numpy.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
], dtype=bool)
}
expected = expected_masks[size]
if not inside:
expected = numpy.bitwise_not(expected)
y, x = numpy.mgrid[:size, :size]
polyverts = [
[(0.5, 2.5), (3.5, 2.5), (3.5, 0.5), (0.5, 0.5)],
[(2.5, 4.5), (5.5, 4.5), (5.5, 2.5), (2.5, 2.5)]
]
mask = misc.mask_with_polygon(x, y, *polyverts, inside=inside)
nptest.assert_array_equal(mask, expected)
@pytest.mark.parametrize(('usemasks', 'fname'), [
pytest.param(False, 'array_grid.shp', marks=pytest.mark.xfail),
pytest.param(True, 'mask_grid.shp', marks=pytest.mark.xfail),
])
def test_gdf_of_cells(usemasks, fname, simple_grid, example_crs):
if usemasks:
mask = numpy.array([
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
])
else:
mask = None
baselinedir = Path(resource_filename('pygridtools.tests', 'baseline_files'))
river = 'test'
expected = geopandas.read_file(str(baselinedir / fname))
result = misc.gdf_of_cells(simple_grid.x, simple_grid.y, mask, example_crs)
utils.assert_gdfs_equal(expected.drop(columns=['river', 'reach']), result)
@pytest.mark.parametrize(('usemasks', 'fname'), [
(False, 'array_point.shp'),
(True, 'mask_point.shp'),
])
def test_gdf_of_points(usemasks, fname, example_crs):
x = numpy.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])
y = numpy.array([[4, 4, 4], [5, 5, 5], [6, 6, 6], [7, 7, 7]])
mask = numpy.array([[1, 0, 0], [1, 0, 0], [1, 0, 0], [1, 0, 0]], dtype=bool)
if usemasks:
x = numpy.ma.masked_array(x, mask)
y = numpy.ma.masked_array(y, mask)
baselinedir = Path(resource_filename('pygridtools.tests', 'baseline_files'))
river = 'test'
expected = geopandas.read_file(str(baselinedir / fname))
result = misc.gdf_of_points(x, y, example_crs)
utils.assert_gdfs_equal(expected.drop(columns=['river', 'reach']), result)
|
|
#!/usr/bin/python -Wall
# ================================================================
# Copyright (c) John Kerl 2007
# [email protected]
# ================================================================
from __future__ import division # 1/2 = 0.5, not 0.
from math import *
from sackmat_m import *
import copy
# ----------------------------------------------------------------
# Let
# F: R^m -> R^n
# i.e.
# [ F_1(x_1, ..., x_m) ]
# F(x) = [ : : : ]
# [ F_n(x_1, ..., x_m) ].
# Then Dij = dFi/dxj, i=1..n, j=1..m (an n x m matrix).
# This is numerically approximated (forward-difference approximation) by
# (F(x1,...,xj+h,...,xn) - F(x1,...,xj,...,xn)) / h
# or (centered-difference approximation)
# (F(x1,...,xj+h/2,...,xn) - F(x1,...,xj-h/2,...,xn)) / h.
def jac(F, q, h=1e-6):
m = len(q)
n = len(F(q))
DFq = make_zero_matrix(n, m)
# Centered-difference approximation
h2 = 0.5 * h
for j in range(0, m):
qb = copy.copy(q)
qf = copy.copy(q)
qb[j] -= h2
qf[j] += h2
Fqb = F(qb)
Fqf = F(qf)
for i in range(0, n):
DFq[i][j] = (Fqf[i] - Fqb[i]) / h
return DFq
# ----------------------------------------------------------------
def F1(q):
[x, y, z] = q
#f1 = x**2
#f2 = y**2
#f3 = z**2
#f1 = x**2 * y**2
#f2 = y**2 * z**2
#f3 = z**2 * x**2
f1 = x * y
f2 = y * z
f3 = z * x
#f1 = 1.0 * y * y
#f2 = 2.0 * x
#f3 = 3.0 * z
return [f1, f2, f3]
# ----------------------------------------------------------------
def F2(q):
[x, y, z] = q
return [x**2 + y**2 + z**2]
# ----------------------------------------------------------------
def do_point(F,q):
print "q =", q
DFq = jac(F, q)
print "DFq="
print DFq
#print "det(DFq) =", DFq.det()
# ----------------------------------------------------------------
def do_point_with_det(F,q):
print "-" * 40
print "q =", q
DFq = jac(F, q)
print "DFq="
print DFq
print "det(DFq) =", DFq.det()
# ----------------------------------------------------------------
def frufru():
F = F1
do_point_with_det(F, [0,0,0])
print
do_point_with_det(F, [0,0,1])
do_point_with_det(F, [0,1,0])
do_point_with_det(F, [1,0,0])
print
do_point_with_det(F, [1,1,0])
do_point_with_det(F, [1,0,1])
do_point_with_det(F, [0,1,1])
print
do_point_with_det(F, [1,1,1])
do_point_with_det(F, [1,2,3])
do_point_with_det(F, [sqrt(0.5),sqrt(0.5),0])
a=0.1
do_point_with_det(F, [cos(a),sin(a),0])
a = 0.2
b = 0.3
c = sqrt(1 - a**2 - b**2)
do_point_with_det(F, [a,b,c])
a = 0.8
b = 0.2
c = sqrt(1 - a**2 - b**2)
do_point_with_det(F, [a,b,c])
print
# ----------------------------------------------------------------
def F(q):
[x, y, z] = q
#f1 = x**2
#f2 = y**2
#f3 = z**2
#f1 = x**2 * y**2
#f2 = y**2 * z**2
#f3 = z**2 * x**2
f1 = x * y
f2 = y * z
f3 = z * x
#f1 = 1.0 * y * y
#f2 = 2.0 * x
#f3 = 3.0 * z
return [f1, f2, f3]
# ----------------------------------------------------------------
def G(q):
[x, y, z] = q
return [x**2 + y**2 + z**2]
# ----------------------------------------------------------------
def gt_something():
thetalo = 0
thetahi = 2*math.pi
philo = 0
phihi = math.pi
nphi = 12
ntheta = 12
if (len(sys.argv) == 3):
nphi = int(sys.argv[1])
ntheta = int(sys.argv[2])
dtheta = (thetahi-thetalo)/ntheta
dphi = (phihi-philo)/nphi
phi = 0
for ii in range(0, nphi):
theta = 0
for jj in range(0, ntheta):
x = sin(phi) * cos(theta)
y = sin(phi) * sin(theta)
z = cos(phi)
q = [x,y,z]
DF = jac(F, q)
d = DF.det()
# Let G(x,y,z) = x^2 + y^2 + z^2. The unit sphere is the level set
# for G(x,y,z) = 1.
# Tangent plane at (u,v,w):
# dG/dx(x-u) + dG/dy(y-v) + dG/dz(z-w)
# where (u,v,w) are the coordinates of the point q and (x,y,z) are variable.
DG = jac(G, q)
# For DF restricted to this tangent plane:
# * DG (i.e. grad G) is the normal vector
# * This gives a point-normal form for the tangent plane
# * Project the standard basis for R3 onto the tangent plane
# * Row-reduce
DF = jac(F, q)
# * Form an orthonormal basis
# * Compute DF of the basis
# * Row-reduce that to get the rank of DF on TM|q
#print "q = ", q,
#print "det(DF) = ", d
#print "%7.4f %7.4f %7.4f %7.4f %7.4f,%7.4f %7.4f,%7.4f %7.4f,%7.4f" % (
# x,y,z, d, DG[0][0], -DG[0][0]*x, DG[0][1], -DG[0][1]*y, DG[0][2], -DG[0][2]*z)
nx = DG[0][0]
ny = DG[0][1]
nz = DG[0][2]
nml = [nx, ny, nz]
e0 = [1,0,0]
e1 = [0,1,0]
e2 = [0,0,1]
# Project the standard basis for R3 down to the tangent plane TM|q.
proj_e0 = projperp(e0, nml)
proj_e1 = projperp(e1, nml)
proj_e2 = projperp(e2, nml)
proj_e = sackmat([proj_e0, proj_e1, proj_e2])
# Row-reduce, compute rank, and trim
proj_e.row_echelon_form()
rank = proj_e.rank_rr()
proj_e.elements = proj_e.elements[0:rank]
# Orthonormalize
proj_e = gram_schmidt(proj_e)
#print "q=[%7.4f,%7.4f,%7.4f]" % (x, y, z),
#print "nml=[%7.4f,%7.4f,%7.4f]" % (nx, ny, nz),
#print "p0=[%7.4f,%7.4f,%7.4f] p1=[%7.4f,%7.4f,%7.4f]" % (
#proj_e[0][0], proj_e[0][1], proj_e[0][2], proj_e[1][0], proj_e[1][1], proj_e[1][2]),
# Take DF of the orthonormal basis.
proj_e = proj_e.transpose()
proj_e = DF * proj_e
proj_e = proj_e.transpose()
rank = proj_e.rank()
#print "p0=[%7.4f,%7.4f,%7.4f] p1=[%7.4f,%7.4f,%7.4f]" % (
#proj_e[0][0], proj_e[0][1], proj_e[0][2], proj_e[1][0], proj_e[1][1], proj_e[1][2]),
#print "rank=", proj_e.rank_rr(),
#print "d=%11.3e" % (d),
# xxx hack
if (rank == 1):
d = 0.7
#print "%11.3e" % (d),
print "%8.4f" % (d),
#print
theta += dtheta
print
phi += dphi
gt_something()
|
|
from python_terraform import * # NOQA
from .common import * # NOQA
RANCHER_AWS_AMI = os.environ.get("AWS_AMI", "")
RANCHER_AWS_USER = os.environ.get("AWS_USER", "ubuntu")
RANCHER_REGION = os.environ.get("AWS_REGION")
RANCHER_VPC_ID = os.environ.get("AWS_VPC")
RANCHER_SUBNETS = os.environ.get("AWS_SUBNET")
RANCHER_AWS_SG = os.environ.get("AWS_SECURITY_GROUPS")
RANCHER_AVAILABILITY_ZONE = os.environ.get("AWS_AVAILABILITY_ZONE")
RANCHER_QA_SPACE = os.environ.get("RANCHER_QA_SPACE", "qa.rancher.space.")
RANCHER_EC2_INSTANCE_CLASS = os.environ.get("AWS_INSTANCE_TYPE", "t3a.medium")
HOST_NAME = os.environ.get('RANCHER_HOST_NAME', "sa")
RANCHER_IAM_ROLE = os.environ.get("RANCHER_IAM_ROLE")
RKE2_CREATE_LB = os.environ.get("RKE2_CREATE_LB", False)
RANCHER_RKE2_VERSION = os.environ.get("RANCHER_RKE2_VERSION", "")
RANCHER_RKE2_CHANNEL = os.environ.get("RANCHER_RKE2_CHANNEL", "null")
RANCHER_RANCHERD_VERSION = os.environ.get("RANCHER_RANCHERD_VERSION", "")
RANCHER_RKE2_NO_OF_SERVER_NODES = \
os.environ.get("RANCHER_RKE2_NO_OF_SERVER_NODES", 3)
RANCHER_RKE2_NO_OF_WORKER_NODES = \
os.environ.get("RANCHER_RKE2_NO_OF_WORKER_NODES", 0)
RANCHER_RKE2_SERVER_FLAGS = os.environ.get("RANCHER_RKE2_SERVER_FLAGS", "server")
RANCHER_RKE2_WORKER_FLAGS = os.environ.get("RANCHER_RKE2_WORKER_FLAGS", "agent")
RANCHER_RKE2_OPERATING_SYSTEM = os.environ.get("RANCHER_RKE2_OPERATING_SYSTEM")
AWS_VOLUME_SIZE = os.environ.get("AWS_VOLUME_SIZE", "20")
RANCHER_RKE2_RHEL_USERNAME = os.environ.get("RANCHER_RKE2_RHEL_USERNAME", "")
RANCHER_RKE2_RHEL_PASSWORD = os.environ.get("RANCHER_RKE2_RHEL_PASSWORD", "")
RANCHER_RKE2_KUBECONFIG_PATH = DATA_SUBDIR + "/rke2_kubeconfig.yaml"
def test_create_rancherd_multiple_control_cluster():
cluster_version = RANCHER_RANCHERD_VERSION
cluster_type = "rancherd"
rke2_clusterfilepath = create_rke2_multiple_control_cluster(cluster_type, \
cluster_version)
fqdn_file = "/tmp/" + RANCHER_HOSTNAME_PREFIX + "_fixed_reg_addr"
with open(fqdn_file, 'r') as f:
fqdn = f.read()
fqdn = fqdn.strip()
print("RANCHERD URL\nhttps://{0}:8443\n".format(fqdn), flush=True)
ip_file = "/tmp/" + RANCHER_HOSTNAME_PREFIX + "_master_ip"
with open(ip_file, 'r') as f:
ip = f.read()
ip = ip.strip()
keyPath = os.path.abspath('.') + '/.ssh/' + AWS_SSH_KEY_NAME
os.chmod(keyPath, 0o400)
print("\n\nRANCHERD USERNAME AND PASSWORD\n", flush=True)
cmd = "ssh -o StrictHostKeyChecking=no -i " + keyPath + " " + RANCHER_AWS_USER + \
"@" + ip + " rancherd reset-admin"
result = run_command(cmd, True)
print(result)
def test_create_rke2_multiple_control_cluster():
cluster_version = RANCHER_RKE2_VERSION
cluster_type = "rke2"
create_rke2_multiple_control_cluster(cluster_type, cluster_version)
def test_import_rke2_multiple_control_cluster():
client = get_user_client()
cluster_version = RANCHER_RKE2_VERSION
cluster_type = "rke2"
rke2_clusterfilepath = create_rke2_multiple_control_cluster(
cluster_type, cluster_version)
cluster = create_rancher_cluster(client, rke2_clusterfilepath)
def create_rke2_multiple_control_cluster(cluster_type, cluster_version):
rke2_kubeconfig_file = "rke2_kubeconfig.yaml"
rke2_clusterfilepath = DATA_SUBDIR + "/" + rke2_kubeconfig_file
tf_dir = DATA_SUBDIR + "/" + "terraform/rke2/master"
keyPath = os.path.abspath('.') + '/.ssh/' + AWS_SSH_KEY_NAME
os.chmod(keyPath, 0o400)
no_of_servers = int(RANCHER_RKE2_NO_OF_SERVER_NODES) - 1
tf = Terraform(working_dir=tf_dir,
variables={'region': RANCHER_REGION,
'vpc_id': RANCHER_VPC_ID,
'subnets': RANCHER_SUBNETS,
'sg_id': RANCHER_AWS_SG,
'availability_zone': RANCHER_AVAILABILITY_ZONE,
'aws_ami': RANCHER_AWS_AMI,
'aws_user': RANCHER_AWS_USER,
'resource_name': RANCHER_HOSTNAME_PREFIX,
'access_key': keyPath,
'ec2_instance_class': RANCHER_EC2_INSTANCE_CLASS,
'username': RANCHER_RKE2_RHEL_USERNAME,
'password': RANCHER_RKE2_RHEL_PASSWORD,
'rke2_version': cluster_version,
'rke2_channel': RANCHER_RKE2_CHANNEL,
'no_of_server_nodes': no_of_servers,
'server_flags': RANCHER_RKE2_SERVER_FLAGS,
'qa_space': RANCHER_QA_SPACE,
'node_os': RANCHER_RKE2_OPERATING_SYSTEM,
'cluster_type': cluster_type,
'iam_role': RANCHER_IAM_ROLE,
'volume_size': AWS_VOLUME_SIZE,
'create_lb': str(RKE2_CREATE_LB).lower()})
print("Creating cluster")
tf.init()
tf.plan(out="plan_server.out")
print(tf.apply("--auto-approve"))
print("\n\n")
if int(RANCHER_RKE2_NO_OF_WORKER_NODES) > 0:
tf_dir = DATA_SUBDIR + "/" + "terraform/rke2/worker"
tf = Terraform(working_dir=tf_dir,
variables={'region': RANCHER_REGION,
'vpc_id': RANCHER_VPC_ID,
'subnets': RANCHER_SUBNETS,
'sg_id': RANCHER_AWS_SG,
'availability_zone': RANCHER_AVAILABILITY_ZONE,
'aws_ami': RANCHER_AWS_AMI,
'aws_user': RANCHER_AWS_USER,
'ec2_instance_class': RANCHER_EC2_INSTANCE_CLASS,
'resource_name': RANCHER_HOSTNAME_PREFIX,
'access_key': keyPath,
'rke2_version': cluster_version,
'rke2_channel': RANCHER_RKE2_CHANNEL,
'username': RANCHER_RKE2_RHEL_USERNAME,
'password': RANCHER_RKE2_RHEL_PASSWORD,
'node_os': RANCHER_RKE2_OPERATING_SYSTEM,
'cluster_type': cluster_type,
'no_of_worker_nodes': int(RANCHER_RKE2_NO_OF_WORKER_NODES),
'worker_flags': RANCHER_RKE2_WORKER_FLAGS,
'iam_role': RANCHER_IAM_ROLE,
'volume_size': AWS_VOLUME_SIZE})
print("Joining worker nodes")
tf.init()
tf.plan(out="plan_worker.out")
print(tf.apply("--auto-approve"))
print("\n\n")
cmd = "cp /tmp/" + RANCHER_HOSTNAME_PREFIX + "_kubeconfig " + \
rke2_clusterfilepath
os.system(cmd)
is_file = os.path.isfile(rke2_clusterfilepath)
assert is_file
print_kubeconfig(rke2_clusterfilepath)
check_cluster_status(rke2_clusterfilepath)
print("\n\nRKE2 Cluster Created\n")
cmd = "kubectl get nodes --kubeconfig=" + rke2_clusterfilepath
print(run_command(cmd))
cmd = "kubectl get pods -A --kubeconfig=" + rke2_clusterfilepath
print(run_command(cmd))
print("\n\n")
return rke2_clusterfilepath
def create_rancher_cluster(client, rke2_clusterfilepath):
if CLUSTER_NAME:
clustername = CLUSTER_NAME
else:
clustername = random_test_name("testcustom-rke2")
cluster = client.create_cluster(name=clustername)
cluster_token = create_custom_host_registration_token(client, cluster)
command = cluster_token.insecureCommand
finalimportcommand = command + " --kubeconfig " + rke2_clusterfilepath
print(finalimportcommand)
result = run_command(finalimportcommand)
clusters = client.list_cluster(name=clustername).data
assert len(clusters) > 0
print("Cluster is")
print(clusters[0])
# Validate the cluster
cluster = validate_cluster(client, clusters[0],
check_intermediate_state=False)
return cluster
def check_cluster_status(kubeconfig):
nodeNotReady = True
retries =0
try:
while nodeNotReady and (retries < 10):
cmd = "kubectl get nodes --no-headers -A --kubeconfig=" + kubeconfig
nodes = execute_command(cmd, False)
nodeNotReady = False
for node in nodes.strip().split("\n"):
state = node.split()[1]
if state != "Ready":
nodeNotReady = True
if not nodeNotReady:
break
time.sleep(60)
retries = retries + 1
if nodeNotReady:
raise AssertionError("Nodes failed to be in Ready state after 5 min")
actual_count_of_nodes = len(nodes.strip().split("\n"))
expected_count_of_nodes = int(RANCHER_RKE2_NO_OF_SERVER_NODES) - 1 + \
int(RANCHER_RKE2_NO_OF_WORKER_NODES)
if actual_count_of_nodes < expected_count_of_nodes:
raise AssertionError("Nodes failed to join the cluster, \
Expected: {} Actual: {}".format(expected_count_of_nodes, actual_count_of_nodes))
podsNotReady = True
retries = 0
while podsNotReady and (retries < 10):
cmd = "kubectl get pods --no-headers -A --kubeconfig=" + kubeconfig
pods = execute_command(cmd, False)
podsNotReady = False
for pod in pods.strip().split("\n"):
status = pod.split()[3]
if status != "Running" and status != "Completed":
podsNotReady = True
if not podsNotReady:
break
time.sleep(60)
retries = retries + 1
if podsNotReady:
raise AssertionError("Pods are not in desired state")
except AssertionError as e:
print("FAIL: {}".format(str(e)))
def execute_command(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
for i in range(3):
try:
res = subprocess.check_output(command, shell=True, text=True)
except subprocess.CalledProcessError:
print("Re-trying...")
time.sleep(10)
return res
|
|
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import mock
from django.core.urlresolvers import reverse
from django.db.models import ProtectedError
from rest_framework import status
from rest_framework.test import APITestCase
from pdc.apps.common.test_utils import TestCaseWithChangeSetMixin
from .models import RoleContact, Person
class ContactRoleRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = ['pdc/apps/contact/fixtures/tests/contact_role.json', ]
def test_create(self):
url = reverse('contactrole-list')
data = {'name': 'test_role'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('name'), 'test_role')
self.assertNumChanges([1])
def test_create_with_wrong_field(self):
url = reverse('contactrole-list')
data = {'wrong_name': 'test_role'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'detail': 'Unknown fields: "wrong_name".'})
self.assertNumChanges([])
def test_create_with_missing_field(self):
url = reverse('contactrole-list')
response = self.client.post(url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"name": ["This field is required."]})
self.assertNumChanges([])
def test_create_with_wrong_value(self):
url = reverse('contactrole-list')
data = {'name': None}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"name": ["This field may not be null."]})
self.assertNumChanges([])
def test_get(self):
url = reverse('contactrole-detail', args=['qe_ack'])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('name'), 'qe_ack')
def test_query(self):
url = reverse('contactrole-list')
response = self.client.get(url + '?name=qe_ack', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('results')[0].get('name'), 'qe_ack')
def test_update(self):
url = reverse('contactrole-detail', args=['qe_ack'])
data = {'name': 'new_role'}
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('name'), 'new_role')
self.assertNumChanges([1])
def test_delete(self):
url = reverse('contactrole-detail', args=['qe_ack'])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([1])
def test_delete_protect(self):
RoleContact.specific_objects.create(username='person1', email='[email protected]',
contact_role='qe_ack')
url = reverse('contactrole-detail', args=['qe_ack'])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("protected", response.content)
self.assertNumChanges([])
class PersonRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = ['pdc/apps/contact/fixtures/tests/person.json', ]
def test_create(self):
url = reverse('person-list')
data = {'username': 'test_person', 'email': '[email protected]'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('username'), 'test_person')
self.assertEqual(response.data.get('email'), '[email protected]')
self.assertNumChanges([1])
def test_create_with_wrong_field(self):
url = reverse('person-list')
data = {'wrong_name': 'test'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'detail': 'Unknown fields: "wrong_name".'})
self.assertNumChanges([])
def test_create_with_missing_field(self):
url = reverse('person-list')
response = self.client.post(url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"username": ["This field is required."],
"email": ["This field is required."]})
self.assertNumChanges([])
def test_create_with_wrong_value(self):
url = reverse('person-list')
data = {'username': None, 'email': None}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"username": ["This field may not be null."],
"email": ["This field may not be null."]})
self.assertNumChanges([])
def test_get(self):
url = reverse('person-detail', args=[3])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('username'), 'person1')
def test_get_second_page(self):
url = reverse('person-list')
for i in range(50):
self.client.post(url,
{'username': 'Dude %d' % i,
'email': 'dude%[email protected]' % i},
format='json')
response = self.client.get(url, {'page': 2}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsNotNone(response.data.get('next'))
self.assertIsNotNone(response.data.get('previous'))
def test_query(self):
url = reverse('person-list')
response = self.client.get(url + '?username=person2', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('results')[0].get('username'), 'person2')
def test_query_with_multiple_values(self):
url = reverse('person-list')
response = self.client.get(url + '?username=person2&username=person1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
self.assertEqual(response.data.get('results')[0].get('username'), 'person1')
self.assertEqual(response.data.get('results')[1].get('username'), 'person2')
def test_query_with_wrong_username(self):
url = reverse('person-list')
response = self.client.get(url + '?username=person3', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_combine_with_wrong_username(self):
url = reverse('person-list')
response = self.client.get(url + '?username=person1&username=person3', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data.get('results')[0].get('username'), 'person1')
def test_query_with_incorrect_combination(self):
url = reverse('person-list')
response = self.client.get(url + '?username=person1&[email protected]', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_with_correct_combination(self):
url = reverse('person-list')
response = self.client.get(url + '?username=person1&[email protected]', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data.get('results')[0].get('username'), 'person1')
def test_patch_update(self):
url = reverse('person-detail', args=[3])
data = {'username': 'new_name'}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('username'), 'new_name')
self.assertNumChanges([1])
def test_put_update(self):
url = reverse('person-detail', args=[3])
data = {'username': 'new_name', 'email': '[email protected]'}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('username'), 'new_name')
self.assertEqual(response.data.get('email'), '[email protected]')
self.assertNumChanges([1])
def test_partial_update_empty(self):
response = self.client.patch(reverse('person-detail', args=[1]), {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_delete(self):
url = reverse('person-detail', args=[3])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([1])
def test_delete_protect(self):
RoleContact.specific_objects.create(username='person1', email='[email protected]',
contact_role='qe_ack')
url = reverse('person-detail', args=[3])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("protected", response.content)
self.assertNumChanges([])
class MaillistRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = ['pdc/apps/contact/fixtures/tests/maillist.json', ]
def test_create(self):
url = reverse('maillist-list')
data = {'mail_name': 'test_person', 'email': '[email protected]'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('mail_name'), 'test_person')
self.assertEqual(response.data.get('email'), '[email protected]')
self.assertNumChanges([1])
def test_create_with_wrong_field(self):
url = reverse('maillist-list')
data = {'wrong_name': 'test'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'detail': 'Unknown fields: "wrong_name".'})
self.assertNumChanges([])
def test_create_with_missing_field(self):
url = reverse('maillist-list')
response = self.client.post(url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"mail_name": ["This field is required."],
"email": ["This field is required."]})
self.assertNumChanges([])
def test_create_with_extra_field(self):
url = reverse('maillist-list')
data = {'mail_name': 'test_person', 'email': '[email protected]', 'foo': 'bar'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Unknown fields: "foo".')
self.assertNumChanges([])
def test_create_with_wrong_value(self):
url = reverse('maillist-list')
data = {'mail_name': None, 'email': None}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"mail_name": ["This field may not be null."],
"email": ["This field may not be null."]})
self.assertNumChanges([])
def test_get(self):
url = reverse('maillist-detail', args=[1])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('mail_name'), 'maillist1')
def test_query(self):
url = reverse('maillist-list')
response = self.client.get(url + '?mail_name=maillist2', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('results')[0].get('mail_name'), 'maillist2')
def test_patch_update(self):
url = reverse('maillist-detail', args=[1])
data = {'mail_name': 'new_name'}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('mail_name'), 'new_name')
self.assertNumChanges([1])
def test_put_update(self):
url = reverse('maillist-detail', args=[1])
data = {'mail_name': 'new_name', 'email': '[email protected]'}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('mail_name'), 'new_name')
self.assertEqual(response.data.get('email'), '[email protected]')
self.assertNumChanges([1])
def test_partial_update_empty(self):
response = self.client.patch(reverse('maillist-detail', args=[1]), {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_delete(self):
url = reverse('maillist-detail', args=[1])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([1])
def test_delete_protect(self):
RoleContact.specific_objects.create(mail_name='maillist1', email='[email protected]',
contact_role='qe_ack')
url = reverse('maillist-detail', args=[1])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("protected", response.content)
self.assertNumChanges([])
def test_multi_delete_protect_no_change_set(self):
RoleContact.specific_objects.create(mail_name='maillist1', email='[email protected]',
contact_role='qe_ack')
url = reverse('maillist-detail', args=[1])
# try to delete it multi times, verify changes count
self.client.delete(url, format='json')
self.client.delete(url, format='json')
self.client.delete(url, format='json')
self.assertNumChanges([])
class RoleContactRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
def setUp(self):
super(RoleContactRESTTestCase, self).setUp()
RoleContact.specific_objects.create(username='person1', email='[email protected]', contact_role='qe_ack')
RoleContact.specific_objects.create(username='person2', email='[email protected]', contact_role='pm')
RoleContact.specific_objects.create(mail_name='maillist1', email='[email protected]', contact_role='qe_team')
RoleContact.specific_objects.create(mail_name='maillist2', email='[email protected]', contact_role='devel_team')
def test_create_changeset_with_new_type(self):
url = reverse('rolecontact-list')
data = {'contact': {'username': 'person1', 'email': '[email protected]'},
'contact_role': 'new_type'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('contact_role'), 'new_type')
self.assertNumChanges([2])
def test_create_changeset_with_new_person(self):
url = reverse('rolecontact-list')
data = {'contact': {'username': 'new_person', 'email': '[email protected]'},
'contact_role': 'pm'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('contact').get('username'), 'new_person')
self.assertEqual(response.data.get('contact').get('email'), '[email protected]')
self.assertEqual(response.data.get('contact_role'), 'pm')
self.assertNumChanges([2])
def test_create_with_person(self):
url = reverse('rolecontact-list')
data = {'contact': {'username': 'test_person', 'email': '[email protected]'},
'contact_role': 'test_type'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('contact').get('username'), 'test_person')
self.assertEqual(response.data.get('contact').get('email'), '[email protected]')
self.assertEqual(response.data.get('contact_role'), 'test_type')
self.assertNumChanges([3])
def test_create_with_maillist(self):
url = reverse('rolecontact-list')
data = {'contact': {'mail_name': 'test_mail', 'email': '[email protected]'},
'contact_role': 'test_type'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get('contact').get('mail_name'), 'test_mail')
self.assertEqual(response.data.get('contact').get('email'), '[email protected]')
self.assertEqual(response.data.get('contact_role'), 'test_type')
self.assertNumChanges([3])
def test_create_with_wrong_field(self):
url = reverse('rolecontact-list')
data = {'wrong_name': 'test_type'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'detail': 'Unknown fields: "wrong_name".'})
self.assertNumChanges([])
def test_create_with_missing_field(self):
url = reverse('rolecontact-list')
response = self.client.post(url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"contact_role": ["This field is required."],
"contact": ["This field is required."]})
self.assertNumChanges([])
def test_create_with_invalid_object_field(self):
url = reverse('rolecontact-list')
data = {'contact': {'username': 'person1', 'invalid_key': '[email protected]'},
'contact_role': 'qe_ack'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('invalid_key', response.content)
self.assertNumChanges([])
def test_create_with_exists_value(self):
url = reverse('rolecontact-list')
data = {'contact': {'username': 'person1', 'email': '[email protected]'},
'contact_role': 'qe_ack'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"non_field_errors": ["The fields (\'contact\', \'contact_role\') must make a unique set."]})
self.assertNumChanges([])
def test_create_with_bad_type(self):
url = reverse('rolecontact-list')
data = {'contact': {'person_name': 'person1', 'e-mail': '[email protected]'},
'contact_role': 'qe_ack'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"contact": ["Could not determine type of contact."]})
self.assertNumChanges([])
def test_get(self):
url = reverse('rolecontact-detail', args=[1])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('contact').get('username'), 'person1')
self.assertEqual(response.data.get('contact').get('email'), '[email protected]')
self.assertEqual(response.data.get('contact_role'), 'qe_ack')
def test_query_with_username(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?username=person2', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('results')[0].get('contact').get('username'), 'person2')
def test_query_with_mail_name(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?mail_name=maillist2', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('results')[0].get('contact').get('mail_name'), 'maillist2')
def test_query_with_email(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '[email protected]', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('results')[0].get('contact').get('email'), '[email protected]')
def test_query_with_contact_role(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?contact_role=pm', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('results')[0].get('contact_role'), 'pm')
def test_query_with_username_list(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?username=person1&username=person2', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
self.assertEqual(response.data.get('results')[0].get('contact').get('username'), 'person1')
self.assertEqual(response.data.get('results')[1].get('contact').get('username'), 'person2')
def test_query_with_mail_name_list(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?mail_name=maillist1&mail_name=maillist2', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
self.assertEqual(response.data.get('results')[0].get('contact').get('mail_name'), 'maillist1')
self.assertEqual(response.data.get('results')[1].get('contact').get('mail_name'), 'maillist2')
def test_query_with_email_list(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '[email protected]&[email protected]', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
self.assertEqual(response.data.get('results')[0].get('contact').get('email'), '[email protected]')
self.assertEqual(response.data.get('results')[1].get('contact').get('email'), '[email protected]')
def test_query_with_contact_role_list(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?contact_role=qe_ack&contact_role=qe_team', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
self.assertEqual(response.data.get('results')[0].get('contact_role'), 'qe_ack')
self.assertEqual(response.data.get('results')[1].get('contact_role'), 'qe_team')
def test_query_with_username_mail_name_mixup(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?username=person1&mail_name=maillist2', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
self.assertEqual(response.data.get('results')[0].get('contact').get('username'), 'person1')
self.assertEqual(response.data.get('results')[1].get('contact').get('mail_name'), 'maillist2')
def test_query_with_username_email_mixup(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?username=person1&[email protected]', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(url + '?username=person1&[email protected]', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertEqual(response.data.get('results')[0].get('contact').get('username'), 'person1')
def test_query_with_username_contact_role_mixup(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?username=person1&contact_role=pm', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(url + '?username=person1&contact_role=qe_ack', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertEqual(response.data.get('results')[0].get('contact').get('username'), 'person1')
self.assertEqual(response.data.get('results')[0].get('contact_role'), 'qe_ack')
def test_query_with_mail_name_email_mixup(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?mail_name=maillist1&[email protected]', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(url + '?mail_name=maillist1&[email protected]', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertEqual(response.data.get('results')[0].get('contact').get('mail_name'), 'maillist1')
self.assertEqual(response.data.get('results')[0].get('contact').get('email'), '[email protected]')
def test_query_with_mail_name_contact_role_mixup(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?mail_name=maillist1&contact_role=devel_team', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(url + '?mail_name=maillist1&contact_role=qe_team', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertEqual(response.data.get('results')[0].get('contact').get('mail_name'), 'maillist1')
self.assertEqual(response.data.get('results')[0].get('contact_role'), 'qe_team')
def test_query_with_email_contact_role_mixup(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '[email protected]&contact_role=pm', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(url + '[email protected]&contact_role=qe_ack', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertEqual(response.data.get('results')[0].get('contact').get('email'), '[email protected]')
self.assertEqual(response.data.get('results')[0].get('contact_role'), 'qe_ack')
def test_query_with_multi_key_list(self):
url = reverse('rolecontact-list')
response = self.client.get(url + '?username=person1&username=person2&contact_role=pm', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertEqual(response.data.get('results')[0].get('contact').get('username'), 'person2')
response = self.client.get(url + '?username=person1&username=person2&contact_role=pm&contact_role=qe_ack', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
self.assertEqual(response.data.get('results')[0].get('contact').get('username'), 'person1')
self.assertEqual(response.data.get('results')[1].get('contact').get('username'), 'person2')
def test_patch_update_with_contact_role(self):
url = reverse('rolecontact-detail', args=[1])
data = {'contact_role': 'pm'}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('contact_role'), 'pm')
self.assertNumChanges([1])
def test_patch_update_with_contact(self):
url = reverse('rolecontact-detail', args=[1])
data = {'contact': {'username': 'new_name', 'email': '[email protected]'}}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('contact').get('username'), 'new_name')
self.assertEqual(response.data.get('contact').get('email'), '[email protected]')
self.assertNumChanges([2])
def test_patch_update_with_bad_contact(self):
url = reverse('rolecontact-detail', args=[1])
data = {'contact': {'mali_list': 'new_name', 'email': '[email protected]'}}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"contact": ["Could not determine type of contact."]})
self.assertNumChanges([])
def test_partial_update_empty(self):
response = self.client.patch(reverse('rolecontact-detail', args=[1]), {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_put_update(self):
url = reverse('rolecontact-detail', args=[1])
data = {'contact': {'username': 'new_name', 'email': '[email protected]'},
'contact_role': 'pm'}
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('contact').get('username'), 'new_name')
self.assertEqual(response.data.get('contact').get('email'), '[email protected]')
self.assertEqual(response.data.get('contact_role'), 'pm')
self.assertNumChanges([2])
def test_put_update_with_bad_data(self):
url = reverse('rolecontact-detail', args=[1])
data = {'contact': {'user_name': 'new_name', 'email': '[email protected]'},
'contact_role': 'pm'}
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'contact': ['Could not determine type of contact.']})
self.assertNumChanges([])
def test_delete(self):
url = reverse('rolecontact-detail', args=[1])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([1])
@mock.patch('rest_framework.mixins.DestroyModelMixin.destroy')
def test_delete_protect(self, mock_destory):
mock_destory.side_effect = ProtectedError("fake PE", None)
url = reverse('rolecontact-detail', args=[1])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"detail": "fake PE None"})
self.assertNumChanges([])
class PersonBulkRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
def setUp(self):
self.eve = Person.objects.create(username='Eve', email='[email protected]').pk
self.mal = Person.objects.create(username='Mal', email='[email protected]').pk
self.non_exist_1 = self.mal + 1
self.non_exist_2 = self.mal + 2
self.eve = str(self.eve)
self.mal = str(self.mal)
self.persons = [{'username': 'Eve', 'email': '[email protected]'},
{'username': 'Mal', 'email': '[email protected]'}]
def test_create_successful(self):
args = [
{'username': 'Alice',
'email': '[email protected]'},
{'username': 'Bob',
'email': '[email protected]'}
]
ids = [self.non_exist_1, self.non_exist_2]
response = self.client.post(reverse('person-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
for data, id in zip(args, ids):
data['id'] = id
self.assertEqual(response.data, args)
self.assertNumChanges([2])
self.assertEqual(Person.objects.all().count(), 4)
def test_create_with_error(self):
args = [
{'username': 'Alice'},
{'username': 'Bob',
'email': '[email protected]'}
]
response = self.client.post(reverse('person-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data,
{'detail': {'email': ['This field is required.']},
'invalid_data': {'username': 'Alice'},
'invalid_data_id': 0})
self.assertNumChanges([])
self.assertEqual(Person.objects.all().count(), 2)
def test_destroy_successful(self):
response = self.client.delete(reverse('person-list'), [self.eve, self.mal], format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertNumChanges([2])
self.assertEqual(Person.objects.all().count(), 0)
def test_destroy_non_found(self):
response = self.client.delete(reverse('person-list'),
[self.eve, self.mal, self.non_exist_1],
format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges()
self.assertEqual(Person.objects.all().count(), 2)
def test_update_successful(self):
args = {
self.eve: {'username': 'Alice',
'email': '[email protected]'},
self.mal: {'username': 'Bob',
'email': '[email protected]'}
}
expected = {
self.eve: {'username': 'Alice',
'email': '[email protected]',
'url': 'http://testserver/rest_api/v1/persons/%s/' % self.eve},
self.mal: {'username': 'Bob',
'email': '[email protected]',
'url': 'http://testserver/rest_api/v1/persons/%s/' % self.mal}
}
response = self.client.put(reverse('person-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data, expected)
self.assertNumChanges([2])
persons = Person.objects.all()
self.assertItemsEqual(args.values(), [person.export() for person in persons])
def test_update_error_bad_data(self):
args = {
self.eve: {'username': 'Alice',
'email': '[email protected]'},
self.mal: {'username': 'Bob'}
}
response = self.client.put(reverse('person-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data,
{'detail': {'email': ['This field is required.']},
'invalid_data': {'username': 'Bob'},
'invalid_data_id': self.mal})
self.assertNumChanges([])
persons = Person.objects.all()
self.assertItemsEqual(self.persons, [person.export() for person in persons])
def test_update_error_not_found(self):
args = {
self.eve: {'username': 'Alice',
'email': '[email protected]'},
self.non_exist_1: {'username': 'Jim',
'email': '[email protected]'}
}
response = self.client.put(reverse('person-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data,
{'detail': 'Not found.',
'invalid_data': {'username': 'Jim',
'email': '[email protected]'},
'invalid_data_id': str(self.non_exist_1)})
self.assertNumChanges([])
persons = Person.objects.all()
self.assertItemsEqual(self.persons, [person.export() for person in persons])
def test_partial_update_successful(self):
args = {self.eve: {'username': 'Alice'},
self.mal: {'username': 'Bob'}}
expected = {
self.eve: {'username': 'Alice',
'email': '[email protected]',
'url': 'http://testserver/rest_api/v1/persons/%s/' % self.eve},
self.mal: {'username': 'Bob',
'email': '[email protected]',
'url': 'http://testserver/rest_api/v1/persons/%s/' % self.mal}
}
response = self.client.patch(reverse('person-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data, expected)
self.assertNumChanges([2])
for ident in expected.keys():
expected[ident].pop('url')
persons = Person.objects.all()
self.assertItemsEqual(expected.values(), [person.export() for person in persons])
def test_partial_update_error_bad_data(self):
args = {self.eve: {'username': 'Alice'},
self.mal: {'email': 'not-an-email-address'}}
response = self.client.patch(reverse('person-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data,
{'detail': {'email': ['Enter a valid email address.']},
'invalid_data': {'email': 'not-an-email-address'},
'invalid_data_id': self.mal})
self.assertNumChanges([])
persons = Person.objects.all()
self.assertItemsEqual(self.persons, [person.export() for person in persons])
def test_partial_update_error_not_found(self):
args = {self.eve: {'username': 'Alice'},
self.non_exist_1: {'email': 'not-an-email-address'}}
response = self.client.patch(reverse('person-list'), args, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data,
{'detail': 'Not found.',
'invalid_data': {'email': 'not-an-email-address'},
'invalid_data_id': str(self.non_exist_1)})
self.assertNumChanges([])
persons = Person.objects.all()
self.assertItemsEqual(self.persons, [person.export() for person in persons])
def test_partial_update_empty(self):
response = self.client.patch(reverse('person-list'), {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Cloud Endpoints API for Package Repository service."""
import functools
import logging
import endpoints
from protorpc import message_types
from protorpc import messages
from protorpc import remote
from components import auth
from components import utils
from . import acl
from . import client
from . import impl
# This is used by endpoints indirectly.
package = 'cipd'
################################################################################
## Messages used by other messages.
class Status(messages.Enum):
"""Response status code, shared by all responses."""
# Operation finished successfully (generic "success" response).
SUCCESS = 1
# The package instance was successfully registered.
REGISTERED = 2
# The package instance was already registered (not a error).
ALREADY_REGISTERED = 3
# Some uncategorized non-transient error happened.
ERROR = 4
# No such package.
PACKAGE_NOT_FOUND = 5
# Package itself is known, but requested instance_id isn't registered.
INSTANCE_NOT_FOUND = 6
# Need to upload package data before registering the package.
UPLOAD_FIRST = 7
# Client binary is not available, the call should be retried later.
NOT_EXTRACTED_YET = 8
# Some asynchronous package processing failed.
PROCESSING_FAILED = 9
# Asynchronous package processing is still running.
PROCESSING_NOT_FINISHED_YET = 10
# More than one instance matches criteria in resolveVersion.
AMBIGUOUS_VERSION = 11
class Package(messages.Message):
"""Information about some registered package."""
package_name = messages.StringField(1, required=True)
registered_by = messages.StringField(2, required=True)
registered_ts = messages.IntegerField(3, required=True)
def package_to_proto(entity):
"""Package entity -> Package proto message."""
return Package(
package_name=entity.package_name,
registered_by=entity.registered_by.to_bytes(),
registered_ts=utils.datetime_to_timestamp(entity.registered_ts))
class PackageInstance(messages.Message):
"""Information about some registered package instance."""
package_name = messages.StringField(1, required=True)
instance_id = messages.StringField(2, required=True)
registered_by = messages.StringField(3, required=True)
registered_ts = messages.IntegerField(4, required=True)
def instance_to_proto(entity):
"""PackageInstance entity -> PackageInstance proto message."""
return PackageInstance(
package_name=entity.package_name,
instance_id=entity.instance_id,
registered_by=entity.registered_by.to_bytes(),
registered_ts=utils.datetime_to_timestamp(entity.registered_ts))
class InstanceTag(messages.Message):
"""Some single package instance tag."""
tag = messages.StringField(1, required=True)
registered_by = messages.StringField(2, required=True)
registered_ts = messages.IntegerField(3, required=True)
def tag_to_proto(entity):
"""InstanceTag entity -> InstanceTag proto message."""
return InstanceTag(
tag=entity.tag,
registered_by=entity.registered_by.to_bytes(),
registered_ts=utils.datetime_to_timestamp(entity.registered_ts))
class PackageRef(messages.Message):
"""Information about some ref belonging to a package."""
instance_id = messages.StringField(1, required=True)
modified_by = messages.StringField(2, required=True)
modified_ts = messages.IntegerField(3, required=True)
def package_ref_to_proto(entity):
"""PackageRef entity -> PackageRef proto message."""
return PackageRef(
instance_id=entity.instance_id,
modified_by=entity.modified_by.to_bytes(),
modified_ts=utils.datetime_to_timestamp(entity.modified_ts))
class PackageACL(messages.Message):
"""Access control list for some package path and all parent paths."""
class ElementaryACL(messages.Message):
"""Single per role, per package path ACL."""
package_path = messages.StringField(1, required=True)
role = messages.StringField(2, required=True)
principals = messages.StringField(3, repeated=True)
modified_by = messages.StringField(4, required=True)
modified_ts = messages.IntegerField(5, required=True)
# List of ACLs split by package path and role. No ordering.
acls = messages.MessageField(ElementaryACL, 1, repeated=True)
def package_acls_to_proto(per_role_acls):
"""Dict {role -> list of PackageACL entities} -> PackageACL message."""
acls = []
for role, package_acl_entities in per_role_acls.iteritems():
for e in package_acl_entities:
principals = []
principals.extend(u.to_bytes() for u in e.users)
principals.extend('group:%s' % g for g in e.groups)
acls.append(PackageACL.ElementaryACL(
package_path=e.package_path,
role=role,
principals=principals,
modified_by=e.modified_by.to_bytes(),
modified_ts=utils.datetime_to_timestamp(e.modified_ts),
))
return PackageACL(acls=acls)
class RoleChange(messages.Message):
"""Describes a single modification to ACL."""
class Action(messages.Enum):
GRANT = 1
REVOKE = 2
# Action to perform.
action = messages.EnumField(Action, 1, required=True)
# Role to modify ('OWNER', 'WRITER', 'READER', ...).
role = messages.StringField(2, required=True)
# Principal ('user:...' or 'group:...') to grant or revoke a role for.
principal = messages.StringField(3, required=True)
def role_change_from_proto(proto, package_path):
"""RoleChange proto message -> acl.RoleChange object.
Raises ValueError on format errors.
"""
if not acl.is_valid_role(proto.role):
raise ValueError('Invalid role %s' % proto.role)
user = None
group = None
if proto.principal.startswith('group:'):
group = proto.principal[len('group:'):]
if not auth.is_valid_group_name(group):
raise ValueError('Invalid group name: "%s"' % group)
else:
# Raises ValueError if proto.user has invalid format, e.g. not 'user:...'.
user = auth.Identity.from_bytes(proto.principal)
return acl.RoleChange(
package_path=package_path,
revoke=(proto.action != RoleChange.Action.GRANT),
role=proto.role,
user=user,
group=group)
class Processor(messages.Message):
"""Status of some package instance processor."""
class Status(messages.Enum):
PENDING = 1
SUCCESS = 2
FAILURE = 3
# Name of the processor, defines what it does.
name = messages.StringField(1, required=True)
# Status of the processing.
status = messages.EnumField(Status, 2, required=True)
def processors_protos(instance):
"""Given PackageInstance entity returns a list of Processor messages."""
def procs_to_msg(procs, status):
return [Processor(name=name, status=status) for name in procs ]
processors = []
processors += procs_to_msg(
instance.processors_pending,
Processor.Status.PENDING)
processors += procs_to_msg(
instance.processors_success,
Processor.Status.SUCCESS)
processors += procs_to_msg(
instance.processors_failure,
Processor.Status.FAILURE)
return processors
################################################################################
class FetchPackageResponse(messages.Message):
"""Results of fetchPackage call."""
# TODO(vadimsh): Add more info (like a list of labels or instances).
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, information about the package.
package = messages.MessageField(Package, 3, required=False)
################################################################################
class ListPackagesResponse(messages.Message):
"""Results of listPackage call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, names of the packages and names of directories.
packages = messages.StringField(3, repeated=True)
directories = messages.StringField(4, repeated=True)
################################################################################
class FetchInstanceResponse(messages.Message):
"""Results of fetchInstance call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, information about the package instance.
instance = messages.MessageField(PackageInstance, 3, required=False)
# For SUCCESS, a signed url to fetch the package instance file from.
fetch_url = messages.StringField(4, required=False)
# For SUCCESS, list of processors applied to the instance.
processors = messages.MessageField(Processor, 5, repeated=True)
################################################################################
class RegisterInstanceResponse(messages.Message):
"""Results of registerInstance call.
upload_session_id and upload_url (if present) can be used with CAS service
(finishUpload call in particular).
Callers are expected to execute following protocol:
1. Attempt to register a package instance by calling registerInstance(...).
2. On UPLOAD_FIRST response, upload package data and finalize the upload by
using upload_session_id and upload_url and calling cas.finishUpload.
3. Once upload is finalized, call registerInstance(...) again.
"""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For REGISTERED or ALREADY_REGISTERED, info about the package instance.
instance = messages.MessageField(PackageInstance, 3, required=False)
# For UPLOAD_FIRST status, a unique identifier of the upload operation.
upload_session_id = messages.StringField(4, required=False)
# For UPLOAD_FIRST status, URL to PUT file to via resumable upload protocol.
upload_url = messages.StringField(5, required=False)
################################################################################
class SetRefRequest(messages.Message):
"""Body of setRef call."""
# ID of the package instance to point the ref too.
instance_id = messages.StringField(1, required=True)
class SetRefResponse(messages.Message):
"""Results of setRef call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, details about the ref.
ref = messages.MessageField(PackageRef, 3, required=False)
################################################################################
class FetchTagsResponse(messages.Message):
"""Results of fetchTags call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, details about found tags.
tags = messages.MessageField(InstanceTag, 3, repeated=True)
class AttachTagsRequest(messages.Message):
"""Body of attachTags call."""
tags = messages.StringField(1, repeated=True)
class AttachTagsResponse(messages.Message):
"""Results of attachTag call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, details about attached tags.
tags = messages.MessageField(InstanceTag, 3, repeated=True)
class DetachTagsResponse(messages.Message):
"""Results of detachTags call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
################################################################################
class SearchResponse(messages.Message):
"""Results of searchInstances call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, list of instances found.
instances = messages.MessageField(PackageInstance, 3, repeated=True)
class ResolveVersionResponse(messages.Message):
"""Results of resolveVersion call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS, concrete existing instance ID.
instance_id = messages.StringField(3, required=False)
################################################################################
class FetchACLResponse(messages.Message):
"""Results of fetchACL call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS status, list of ACLs split by package path and role.
acls = messages.MessageField(PackageACL, 3, required=False)
################################################################################
class ModifyACLRequest(messages.Message):
"""Body of modifyACL call."""
changes = messages.MessageField(RoleChange, 1, repeated=True)
class ModifyACLResponse(messages.Message):
"""Results of modifyACL call."""
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
################################################################################
class FetchClientBinaryResponse(messages.Message):
"""Results of fetchClientBinary call."""
class ClientBinary(messages.Message):
# SHA1 hex digest of the extracted binary, for verification on the client.
sha1 = messages.StringField(1, required=True)
# Size of the binary file, just for information.
size = messages.IntegerField(2, required=True)
# A signed url to fetch the binary file from.
fetch_url = messages.StringField(3, required=True)
status = messages.EnumField(Status, 1, required=True)
error_message = messages.StringField(2, required=False)
# For SUCCESS or NOT_EXTRACTED_YET, information about the package instance.
instance = messages.MessageField(PackageInstance, 3, required=False)
# For SUCCESS, information about the client binary.
client_binary = messages.MessageField(ClientBinary, 4, required=False)
################################################################################
class Error(Exception):
status = Status.ERROR
class PackageNotFoundError(Error):
status = Status.PACKAGE_NOT_FOUND
class InstanceNotFoundError(Error):
status = Status.INSTANCE_NOT_FOUND
class ProcessingFailedError(Error):
status = Status.PROCESSING_FAILED
class ProcessingNotFinishedYetError(Error):
status = Status.PROCESSING_NOT_FINISHED_YET
class ValidationError(Error):
# TODO(vadimsh): Use VALIDATION_ERROR. It changes JSON protocol.
status = Status.ERROR
def validate_package_name(package_name):
if not impl.is_valid_package_path(package_name):
raise ValidationError('Invalid package name')
return package_name
def validate_package_path(package_path):
if not impl.is_valid_package_path(package_path):
raise ValidationError('Invalid package path')
return package_path
def validate_package_ref(ref):
if not impl.is_valid_package_ref(ref):
raise ValidationError('Invalid package ref name')
return ref
def validate_instance_id(instance_id):
if not impl.is_valid_instance_id(instance_id):
raise ValidationError('Invalid package instance ID')
return instance_id
def validate_instance_tag(tag):
if not impl.is_valid_instance_tag(tag):
raise ValidationError('Invalid tag "%s"' % tag)
return tag
def validate_instance_tag_list(tags):
if not tags:
raise ValidationError('Tag list is empty')
return [validate_instance_tag(tag) for tag in tags]
def validate_instance_version(version):
if not impl.is_valid_instance_version(version):
raise ValidationError('Not a valid instance ID or tag: "%s"' % version)
return version
def endpoints_method(request_message, response_message, **kwargs):
"""Wrapper around Endpoint methods to simplify error handling.
Catches Error exceptions and converts them to error responses. Assumes
response_message has fields 'status' and 'error_message'.
"""
assert hasattr(response_message, 'status')
assert hasattr(response_message, 'error_message')
def decorator(f):
@auth.endpoints_method(request_message, response_message, **kwargs)
@functools.wraps(f)
def wrapper(*args):
try:
response = f(*args)
if response.status is None:
response.status = Status.SUCCESS
return response
except Error as e:
return response_message(
status=e.status,
error_message=e.message if e.message else None)
except auth.Error as e:
caller = auth.get_current_identity().to_bytes()
logging.warning('%s (%s): %s', e.__class__.__name__, caller, e)
raise
return wrapper
return decorator
################################################################################
@auth.endpoints_api(
name='repo',
version='v1',
title='Package Repository API')
class PackageRepositoryApi(remote.Service):
"""Package Repository API."""
# Cached value of 'service' property.
_service = None
@property
def service(self):
"""Returns configured impl.RepoService."""
if self._service is None:
self._service = impl.get_repo_service()
if self._service is None or not self._service.is_fetch_configured():
raise endpoints.InternalServerErrorException(
'Service is not configured')
return self._service
def get_instance(self, package_name, instance_id):
"""Grabs PackageInstance or raises appropriate *NotFoundError."""
instance = self.service.get_instance(package_name, instance_id)
if instance is None:
pkg = self.service.get_package(package_name)
if pkg is None:
raise PackageNotFoundError()
raise InstanceNotFoundError()
return instance
def verify_instance_exists(self, package_name, instance_id):
"""Raises appropriate *NotFoundError if instance is missing."""
self.get_instance(package_name, instance_id)
def verify_instance_is_ready(self, package_name, instance_id):
"""Raises appropriate error if instance doesn't exist or not ready yet.
Instance is ready when all processors successfully finished.
"""
instance = self.get_instance(package_name, instance_id)
if instance.processors_failure:
raise ProcessingFailedError(
'Failed processors: %s' % ', '.join(instance.processors_failure))
if instance.processors_pending:
raise ProcessingNotFinishedYetError(
'Pending processors: %s' % ', '.join(instance.processors_pending))
### Package methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True)),
FetchPackageResponse,
http_method='GET',
path='package',
name='fetchPackage')
def fetch_package(self, request):
"""Returns information about a package."""
package_name = validate_package_name(request.package_name)
caller = auth.get_current_identity()
if not acl.can_fetch_package(package_name, caller):
raise auth.AuthorizationError()
pkg = self.service.get_package(package_name)
if pkg is None:
raise PackageNotFoundError()
return FetchPackageResponse(package=package_to_proto(pkg))
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
path=messages.StringField(1, required=False),
recursive=messages.BooleanField(2, required=False)),
ListPackagesResponse,
http_method='GET',
path='package/search',
name='listPackages')
def list_packages(self, request):
"""Returns packages in the given directory and possibly subdirectories."""
path = request.path or ''
recursive = request.recursive or False
pkgs, dirs = self.service.list_packages(path, recursive)
caller = auth.get_current_identity()
visible_pkgs = [p for p in pkgs if acl.can_fetch_package(p, caller)]
visible_dirs = [d for d in dirs if acl.can_fetch_package(d, caller)]
return ListPackagesResponse(packages=visible_pkgs, directories=visible_dirs)
### PackageInstance methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
FetchInstanceResponse,
http_method='GET',
path='instance',
name='fetchInstance')
def fetch_instance(self, request):
"""Returns signed URL that can be used to fetch a package instance."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
instance = self.get_instance(package_name, instance_id)
return FetchInstanceResponse(
instance=instance_to_proto(instance),
fetch_url=self.service.generate_fetch_url(instance),
processors=processors_protos(instance))
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
RegisterInstanceResponse,
path='instance',
http_method='POST',
name='registerInstance')
def register_instance(self, request):
"""Registers a new package instance in the repository."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_register_instance(package_name, caller):
raise auth.AuthorizationError()
instance = self.service.get_instance(package_name, instance_id)
if instance is not None:
return RegisterInstanceResponse(
status=Status.ALREADY_REGISTERED,
instance=instance_to_proto(instance))
# Need to upload to CAS first? Open an upload session. Caller must use
# CASServiceApi to finish the upload and then call registerInstance again.
if not self.service.is_instance_file_uploaded(package_name, instance_id):
upload_url, upload_session_id = self.service.create_upload_session(
package_name, instance_id, caller)
return RegisterInstanceResponse(
status=Status.UPLOAD_FIRST,
upload_session_id=upload_session_id,
upload_url=upload_url)
# Package data is in the store. Make an entity.
instance, registered = self.service.register_instance(
package_name=package_name,
instance_id=instance_id,
caller=caller,
now=utils.utcnow())
return RegisterInstanceResponse(
status=Status.REGISTERED if registered else Status.ALREADY_REGISTERED,
instance=instance_to_proto(instance))
### Refs methods.
@endpoints_method(
endpoints.ResourceContainer(
SetRefRequest,
package_name=messages.StringField(1, required=True),
ref=messages.StringField(2, required=True)),
SetRefResponse,
path='ref',
http_method='POST',
name='setRef')
def set_ref(self, request):
"""Creates a ref or moves an existing one."""
package_name = validate_package_name(request.package_name)
ref = validate_package_ref(request.ref)
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_move_ref(package_name, ref, caller):
raise auth.AuthorizationError('Not authorized to move "%s"' % ref)
self.verify_instance_is_ready(package_name, instance_id)
ref_entity = self.service.set_package_ref(
package_name=package_name,
ref=ref,
instance_id=instance_id,
caller=caller,
now=utils.utcnow())
return SetRefResponse(ref=package_ref_to_proto(ref_entity))
### Tags methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True),
tag=messages.StringField(3, repeated=True)),
FetchTagsResponse,
path='tags',
http_method='GET',
name='fetchTags')
def fetch_tags(self, request):
"""Lists package instance tags (in no particular order)."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
tags = validate_instance_tag_list(request.tag) if request.tag else None
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
self.verify_instance_exists(package_name, instance_id)
if not tags:
# Fetch all.
attached = self.service.query_tags(package_name, instance_id)
else:
# Fetch selected only. "Is tagged by?" check essentially.
found = self.service.get_tags(package_name, instance_id, tags)
attached = [found[tag] for tag in tags if found[tag]]
return FetchTagsResponse(tags=[tag_to_proto(tag) for tag in attached])
@endpoints_method(
endpoints.ResourceContainer(
AttachTagsRequest,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
AttachTagsResponse,
path='tags',
http_method='POST',
name='attachTags')
def attach_tags(self, request):
"""Attaches a set of tags to a package instance."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
tags = validate_instance_tag_list(request.tags)
caller = auth.get_current_identity()
for tag in tags:
if not acl.can_attach_tag(package_name, tag, caller):
raise auth.AuthorizationError('Not authorized to attach "%s"' % tag)
self.verify_instance_is_ready(package_name, instance_id)
attached = self.service.attach_tags(
package_name=package_name,
instance_id=instance_id,
tags=tags,
caller=caller,
now=utils.utcnow())
return AttachTagsResponse(tags=[tag_to_proto(attached[t]) for t in tags])
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True),
tag=messages.StringField(3, repeated=True)),
DetachTagsResponse,
path='tags',
http_method='DELETE',
name='detachTags')
def detach_tags(self, request):
"""Removes given tags from a package instance."""
package_name = validate_package_name(request.package_name)
instance_id = validate_instance_id(request.instance_id)
tags = validate_instance_tag_list(request.tag)
caller = auth.get_current_identity()
for tag in tags:
if not acl.can_detach_tag(package_name, tag, caller):
raise auth.AuthorizationError('Not authorized to detach "%s"' % tag)
self.verify_instance_exists(package_name, instance_id)
self.service.detach_tags(
package_name=package_name,
instance_id=instance_id,
tags=tags)
return DetachTagsResponse()
### Search methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
tag=messages.StringField(1, required=True),
package_name=messages.StringField(2, required=False)),
SearchResponse,
path='instance/search',
http_method='GET',
name='searchInstances')
def search_instances(self, request):
"""Returns package instances with given tag (in no particular order)."""
tag = validate_instance_tag(request.tag)
if request.package_name:
package_name = validate_package_name(request.package_name)
else:
package_name = None
caller = auth.get_current_identity()
callback = None
if package_name:
# If search is limited to one package, check its ACL only once.
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
else:
# Filter out packages not allowed by ACL.
acl_cache = {}
def check_readable(package_name, _instance_id):
if package_name not in acl_cache:
acl_cache[package_name] = acl.can_fetch_instance(package_name, caller)
return acl_cache[package_name]
callback = check_readable
found = self.service.search_by_tag(tag, package_name, callback)
return SearchResponse(instances=[instance_to_proto(i) for i in found])
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
version=messages.StringField(2, required=True)),
ResolveVersionResponse,
path='instance/resolve',
http_method='GET',
name='resolveVersion')
def resolve_version(self, request):
"""Returns instance ID of an existing instance given a ref or a tag."""
package_name = validate_package_name(request.package_name)
version = validate_instance_version(request.version)
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
pkg = self.service.get_package(package_name)
if pkg is None:
raise PackageNotFoundError()
ids = self.service.resolve_version(package_name, version, limit=2)
if not ids:
raise InstanceNotFoundError()
if len(ids) > 1:
return ResolveVersionResponse(
status=Status.AMBIGUOUS_VERSION,
error_message='More than one instance has tag "%s" set' % version)
return ResolveVersionResponse(instance_id=ids[0])
### ACL methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_path=messages.StringField(1, required=True)),
FetchACLResponse,
http_method='GET',
path='acl',
name='fetchACL')
def fetch_acl(self, request):
"""Returns access control list for a given package path."""
package_path = validate_package_path(request.package_path)
caller = auth.get_current_identity()
if not acl.can_fetch_acl(package_path, caller):
raise auth.AuthorizationError()
return FetchACLResponse(
acls=package_acls_to_proto({
role: acl.get_package_acls(package_path, role)
for role in acl.ROLES
}))
@endpoints_method(
endpoints.ResourceContainer(
ModifyACLRequest,
package_path=messages.StringField(1, required=True)),
ModifyACLResponse,
http_method='POST',
path='acl',
name='modifyACL')
def modify_acl(self, request):
"""Changes access control list for a given package path."""
package_path = validate_package_path(request.package_path)
try:
changes = [
role_change_from_proto(msg, package_path)
for msg in request.changes
]
except ValueError as exc:
raise ValidationError('Invalid role change request: %s' % exc)
caller = auth.get_current_identity()
if not acl.can_modify_acl(package_path, caller):
raise auth.AuthorizationError()
# Apply changes. Do not catch ValueError. Validation above should be
# sufficient. If it is not, HTTP 500 and an uncaught exception in logs is
# exactly what is needed.
acl.modify_roles(changes, caller, utils.utcnow())
return ModifyACLResponse()
### ClientBinary methods.
@endpoints_method(
endpoints.ResourceContainer(
message_types.VoidMessage,
package_name=messages.StringField(1, required=True),
instance_id=messages.StringField(2, required=True)),
FetchClientBinaryResponse,
http_method='GET',
path='client',
name='fetchClientBinary')
def fetch_client_binary(self, request):
"""Returns signed URL that can be used to fetch CIPD client binary."""
package_name = validate_package_name(request.package_name)
if not client.is_cipd_client_package(package_name):
raise ValidationError('Not a CIPD client package')
instance_id = validate_instance_id(request.instance_id)
caller = auth.get_current_identity()
if not acl.can_fetch_instance(package_name, caller):
raise auth.AuthorizationError()
# Grab the location of the extracted binary.
instance = self.get_instance(package_name, instance_id)
client_info, error_message = self.service.get_client_binary_info(instance)
if error_message:
raise Error(error_message)
if client_info is None:
return FetchClientBinaryResponse(
status=Status.NOT_EXTRACTED_YET,
instance=instance_to_proto(instance))
return FetchClientBinaryResponse(
instance=instance_to_proto(instance),
client_binary=FetchClientBinaryResponse.ClientBinary(
sha1=client_info.sha1,
size=client_info.size,
fetch_url=client_info.fetch_url))
|
|
# main.py that controls the whole app
# to run: just run bokeh serve --show crossfilter_app in the benchmark-view repo
from random import random
import os
from bokeh.layouts import column
from bokeh.models import Button
from bokeh.models.widgets import Select, MultiSelect, Slider
from bokeh.palettes import RdYlBu3
from bokeh.plotting import figure, curdoc
#### CROSSFILTER PART ##### >>> Module load errors throwing up how to do a relative import ?
from crossview.crossfilter.models import CrossFilter
#from benchmark.loader import load
#### DATA INPUT FROM REST API ######
#from benchmark.loader import load
#### DATA INPUT STRAIGHT FROM PANDAS for test purposes ####
import pandas as pd
##### PLOTTING PART -- GLOBAL FIGURE CREATION ########
# create a plot and style its properties
## gloabl data interface to come from REST API
vasp_data = pd.read_csv('../benchmark/data/francesca_data_head.csv')
p = figure(x_range=(0, 100), y_range=(0, 100), toolbar_location='below')
#p.border_fill_color = 'black'
#p.background_fill_color = 'black'
p.outline_line_color = None
p.grid.grid_line_color = None
#### FORMAT OF DATA SENT TO WIDGET #######
# add a text renderer to out plot (no data yet)
r = p.text(x=[], y=[], text=[], text_color=[], text_font_size="20pt",
text_baseline="middle", text_align="center")
r2 = p.circle(x=[], y=[])
i = 0
ds = r.data_source
ds2 = r2.data_source
##### WIDGET RESPONSES IN THE FORM OF CALLBACKS ######
# create a callback that will add a number in a random location
def callback():
global i
# BEST PRACTICE --- update .data in one step with a new dict
new_data = dict()
new_data['x'] = ds.data['x'] + [random()*70 + 15]
new_data['y'] = ds.data['y'] + [random()*70 + 15]
new_data['text_color'] = ds.data['text_color'] + [RdYlBu3[i%3]]
new_data['text'] = ds.data['text'] + [str(i)]
ds.data = new_data
i = i + 1
#### The make crossfilter callback
#### make data loading as easy as possible for now straight from
#### the benchmark data csv file not from the API with the decorators
#### TO DO after we see that the crossfilter and new bokeh play nicely
##########: integrate with API and uncomment the decorators and data loader
#@bokeh_app.route("/bokeh/benchmark/")
#@object_page("benchmark")
#### RENDERERS OF WIDGETS #####
def make_bokeh_crossfilter(axis='k-point'):
"""The root crossfilter controller"""
# Loading the dft data head as a
# pandas dataframe
new_data = dict()
# new_data = load("./benchmark/data/francesca_data_head")
# use a straight pandas dataframe for now instead and follow the
# BEST PRACTICE described above basically clean up the data object on each callback.
# data that will be given back on the callback
new_data = vasp_data # our data that will be replaced by the API
global p
p = CrossFilter.create(df=new_data)
print (type(p))
# dont know what Crossfilter class really returns in terms of data but for testnig purposes lets
# return something that is compatible with the new_data dictionary return in the
# vanilla example through the global object ds.data
# for example the x - y coordinates on the plots correspond to mins on the data set in k-point and value fields
# new_data['x'] = ds2.data['x'] + list(data[axis])
# new_data['y'] = ds2.data['y'] + list(data['value'])
# other stuff default as in vanilla callback()
# for test purposes to see actually what coordinate is getting plotted
# it is always going to be the same duh beccause only one min exist in the dataset
# its at x = 6, y = -12 ,
# SUCESS learnt how to create a custom callback !!! that loads a CSV file and does something with it
# print ("New data from crossfilter", new_data)
# finally assign to ds.data
# ds2.data = new_data
def make_wflow_crossfilter(tags={'element_widget':['Cu', 'Pd', 'Mo'], 'code_widget':['VASP'], 'ExchCorr':['PBE']}):
"""
demo crossfilter based on pure pandas dataframes that serves a data processing
workflow that selects inputs from widgets
args:
tags: dict of selections by upto 3 widgets
returns:
dictionary of crossfiltered dataframes that can further be processed down the workflow
"""
## Actual widget controlled inputs ##
# elements = tags['element']
# exchanges = tags['ExchCorr']
# propys = tags['code_widget']
## Demo user inputs for testing selects everything in the test csv : max data load ##
elements = np.unique(vasp_data['element'])
exchanges = np.unique(vasp_data['exchange'])
propys = ['B','dB','a0']
# final dictionary of crossfiltered dataframes
crossfilts = {}
# crossfiltering part - playing the role of the "Crossfilter class in bokeh.models"
for pr in propys:
for el in elements:
for ex in exchanges:
# crossfilter down to exchange and element
elems = vasp_data[vasp_data['element']==el]
exchs = elems[elems['exchange']==ex]
# separate into properties, energy, kpoints
p = exchs[exchs['property']==pr]
e = exchs[exchs['property']=='e0']
##### *** Accuracy calculation based on default standards *** #####
# choose reference from dict
ref_e = expt_ref_prb[el][pr]
ref_w = wien_ref[el][pr]
# calculate percent errors on property - ACCURACY CALCULATION based on default standards
props = [v for v in p['value'] ]
percs_wien = [ (v - ref_w) / ref_w * 100 for v in p['value']]
percs_prb = [ (v - ref_e) / ref_e * 100 for v in p['value']]
kpts = [ k for k in p['k-point']]
kpts_atom = [ k**3 for k in p['k-point'] ]
##### *** Accuracy calculation based on default standards *** #####
##### *** Calculate prec_sigma of energy *** #####
energy = [ v for v in e['value']]
end= len(energy) - 1
prec_sigma = [ v - energy[end] for v in energy]
# make data frame of kpoints, energy, percent errors on property
if kpts and energy and props:
NAME = '_'.join([el,ex,pr])
Rdata =\
pd.DataFrame({'Kpoints_size':kpts, 'Kpoints_atom_density':kpts_atom, 'Energy':energy, 'Prec_Sigma':prec_sigma , pr:props, 'percent_error_wien':percs_wien, 'percent_error_expt':percs_prb })
crossfilts[NAME] = Rdata
def calculate_prec(cross_df, automate= False):
"""
function that calculates the prec_inf using R
and returns a fully contructed plottable dataframe
Args:
cross_df: pandas dataframe containing the data
automate: bool, a To do feature to automatically calculate the best fit
Returns:
dataframe contining the R added precision values to be
received most always by the plotting commander.
"""
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri
import rpy2.rinterface as rin
stats = importr('stats')
base = importr('base')
# activate R environemnt in python
rpy2.robjects.numpy2ri.activate()
pandas2ri.activate()
# read in necessary elements ofmenu = [("Item 1", "item_1_value"), ("Item 2", "item_2_value"), ("Item 3", "item_3_value")]
df = pd.DataFrame({'x': cross_df['Kpoints_atom_density'],
'y': cross_df['Energy']})
ro.globalenv['dataframe']=df
### *** R used to obtain the fit on the data to calculate prec_inf *** ###
# perform regression - bokeh widgets can be used here to provide the inputs to the nls regression
# some python to R translation of object names via the pandas - R dataframes
y = df['y']
x = df['x']
l = len(y) - 1 # needed because R indexes list from 1 to len(list)
# ***WIDGET inputs*** # OR AUTOMATE
# the slider inputs on starting point or can be automated also
l1 = 3
l2 = 0
fitover = rin.SexpVector(list(range(l1,l-l2)), rin.INTSXP)
# numeric entry widget for 'b' is plausible for user to choose best starting guess
start_guess = {'a': y[l], 'b': 5}
start=pandas2ri.py2ri(pd.DataFrame(start_guess,index=start_guess))
# drop down list selection of model
model = 'y~a*x/(b+x)'
# Minimize function with weights and selection
m = \
stats.nls(model, start = start, algorithm = "port", subset = fitover, weights = x^2, data=base.as_symbol('dataframe'))
# Estimation of goodness of fit
g = stats.cor(y[l1:l-l2],stats.predict(m))
# Report summary of fit, values and error bars
print( base.summary(m).rx2('coefficients') )
# Extrapolation value is given by a
a = stats.coef(m)[1]
# Calculation of precision
prec = abs(y-a)
# test print outs of the data ? how to render onto html like Shiny if necesary ?
print("We learn that the converged value is: {0} and best precision achieved in the measurement is {1}".format(a, min(abs(prec))))
cross_df['Energy_Prec_Inf'] = prec
# close the R environments
rpy2.robjects.numpy2ri.deactivate()
pandas2ri.deactivate()
return (cross_df)
def make_widgets():
"""
main function that will control the rendering of UI widgets
"""
pass
#### WIDGET CREATIONS ####
# OLD VANILLA
# add a button widget and configure with the call back
# button_basic = Button(label="Press Me")
# button_basic.on_click(callback)
#make_bokeh_crossfilter()
# create a button for Select button for input
#menu = [("Bulk Modulus", "B"), ("B'", "dB"), ("Lattice Constant", "a0")]
#select_property = Select(name="Selection", options=menu, value="B")
#select_property.on_click(make_bokeh_crossfilter(axis=value))
# create a button for make crossfilter app
button_crossfilter = Button(label="Make Crossfilter")
button_crossfilter.on_click(make_bokeh_crossfilter)
#create a button for crossfilter_workflwo
button_w_crossfilter = Button(label="Make Crossfilter Workflow")
button_w_crossfilter.on_click(make_wflow_crossfilter)
# put the button and plot in a layout and add to the document
curdoc().add_root(column(button_crossfilter, button_w_crossfilter, p))
|
|
from babel import Locale
from ..allspeak import pluralize
def test_pluralize_numbers():
d = {
0: u'No apples',
1: u'One apple',
3: u'Few apples',
'other': u'{count} apples',
}
assert pluralize(d, 0) == u'No apples'
assert pluralize(d, 1) == u'One apple'
assert pluralize(d, 3) == u'Few apples'
assert pluralize(d, 10) == u'{count} apples'
def test_pluralize_literal():
d = {
'zero': u'No apples',
'one': u'One apple',
'few': u'Few apples',
'many': u'{count} apples',
}
assert pluralize(d, 0) == u'No apples'
assert pluralize(d, 1) == u'One apple'
assert pluralize(d, 3) == u'{count} apples'
assert pluralize(d, 10) == u'{count} apples'
def test_pluralize_mixed():
d = {
'one': u'One apple',
2: u'Two apples',
'other': u'{count} apples',
}
assert pluralize(d, 1) == u'One apple'
assert pluralize(d, 2) == u'Two apples'
assert pluralize(d, 10) == u'{count} apples'
def test_pluralize_zero_or_many():
d = {
'zero': u'off',
'many': u'on'
}
assert pluralize(d, 3) == u'on'
d = {
'zero': u'off',
'many': u'on'
}
assert pluralize(d, 0) == u'off'
assert pluralize(d, None) == u'off'
assert pluralize({}, 3) == u''
def test_pluralize_other():
d = {
'one': u'One apple',
'other': u'meh',
}
assert pluralize(d, 0) == u'meh'
assert pluralize(d, 1) == u'One apple'
assert pluralize(d, 2) == u'meh'
assert pluralize(d, 3) == u'meh'
assert pluralize(d, 10) == u'meh'
def test_two_plural_mode():
d = {
'zero': u'zero',
'one': u'one',
'two': u'two',
'few': u'few',
'other': u'other',
}
locale = Locale('en')
assert pluralize(d, 0, locale) == u'zero'
assert pluralize(d, 1, locale) == u'one'
assert pluralize(d, 2, locale) == u'other'
assert pluralize(d, 3, locale) == u'other'
assert pluralize(d, 4, locale) == u'other'
assert pluralize(d, 5, locale) == u'other'
assert pluralize(d, 6, locale) == u'other'
assert pluralize(d, 7, locale) == u'other'
assert pluralize(d, 10, locale) == u'other'
assert pluralize(d, 11, locale) == u'other'
assert pluralize(d, 50, locale) == u'other'
assert pluralize(d, 99, locale) == u'other'
assert pluralize(d, 101, locale) == u'other'
assert pluralize(d, 102, locale) == u'other'
assert pluralize(d, 105, locale) == u'other'
def test_one_plural_mode():
d = {
'one': u'one',
'two': u'two',
'few': u'few',
'many': u'many',
'other': u'other',
}
locale = Locale('zh')
assert pluralize(d, 0, locale) == u'other'
assert pluralize(d, 1, locale) == u'other'
assert pluralize(d, 2, locale) == u'other'
assert pluralize(d, 3, locale) == u'other'
assert pluralize(d, 4, locale) == u'other'
assert pluralize(d, 5, locale) == u'other'
assert pluralize(d, 6, locale) == u'other'
assert pluralize(d, 7, locale) == u'other'
assert pluralize(d, 10, locale) == u'other'
assert pluralize(d, 11, locale) == u'other'
assert pluralize(d, 50, locale) == u'other'
assert pluralize(d, 99, locale) == u'other'
assert pluralize(d, 101, locale) == u'other'
assert pluralize(d, 102, locale) == u'other'
assert pluralize(d, 105, locale) == u'other'
d = {
'zero': u'zero',
'one': u'one',
'two': u'two',
'few': u'few',
'many': u'many',
'other': u'other',
}
locale = Locale('zh')
assert pluralize(d, 0, locale) == u'zero'
def test_pluralize_arabic():
d = {
'zero': u'zero',
'one': u'one',
'two': u'two',
'few': u'few',
'many': u'many',
'other': u'other',
}
locale = Locale('ar')
assert pluralize(d, 0, locale) == u'zero'
assert pluralize(d, 1, locale) == u'one'
assert pluralize(d, 2, locale) == u'two'
assert pluralize(d, 3, locale) == u'few'
assert pluralize(d, 4, locale) == u'few'
assert pluralize(d, 5, locale) == u'few'
assert pluralize(d, 6, locale) == u'few'
assert pluralize(d, 7, locale) == u'few'
assert pluralize(d, 10, locale) == u'few'
assert pluralize(d, 11, locale) == u'many'
assert pluralize(d, 50, locale) == u'many'
assert pluralize(d, 99, locale) == u'many'
assert pluralize(d, 101, locale) == u'other'
assert pluralize(d, 102, locale) == u'other'
assert pluralize(d, 105, locale) == u'few'
def test_pluralize_russian():
d = {
'zero': u'zero',
'one': u'one',
'two': u'two',
'few': u'few',
'many': u'many',
'other': u'other',
}
locale = Locale('ru')
assert pluralize(d, 0, locale) == u'zero'
assert pluralize(d, 1, locale) == u'one'
assert pluralize(d, 2, locale) == u'few'
assert pluralize(d, 3, locale) == u'few'
assert pluralize(d, 4, locale) == u'few'
assert pluralize(d, 5, locale) == u'many'
assert pluralize(d, 6, locale) == u'many'
assert pluralize(d, 7, locale) == u'many'
assert pluralize(d, 10, locale) == u'many'
assert pluralize(d, 11, locale) == u'many'
assert pluralize(d, 21, locale) == u'one'
assert pluralize(d, 22, locale) == u'few'
assert pluralize(d, 23, locale) == u'few'
assert pluralize(d, 24, locale) == u'few'
assert pluralize(d, 25, locale) == u'many'
assert pluralize(d, 50, locale) == u'many'
assert pluralize(d, 99, locale) == u'many'
assert pluralize(d, 101, locale) == u'one'
assert pluralize(d, 102, locale) == u'few'
assert pluralize(d, 105, locale) == u'many'
assert pluralize(d, 111, locale) == u'many'
assert pluralize(d, 112, locale) == u'many'
assert pluralize(d, 113, locale) == u'many'
assert pluralize(d, 114, locale) == u'many'
assert pluralize(d, 119, locale) == u'many'
assert pluralize(d, 121, locale) == u'one'
assert pluralize(d, 122, locale) == u'few'
assert pluralize(d, 125, locale) == u'many'
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_objfile
----------------------------------
Tests for GCSObjFile class and auxiliary classes.
"""
import os
import six
import unittest
import mock
from gcs_client import errors
from gcs_client import gcs_object
class TestBuffer(unittest.TestCase):
"""Tests for _Buffer class."""
def setUp(self):
self.buf = gcs_object._Buffer()
def test_init(self):
"""Test buffer initialization."""
self.assertEqual(0, len(self.buf))
def test_write(self):
"""Test basic write method."""
data = b'0' * 50 + b'1' * 50
self.buf.write(data)
self.assertEqual(len(data), len(self.buf))
self.assertEqual(1, len(self.buf._queue))
self.assertEqual(data, self.buf._queue[0])
def test_multiple_writes(self):
"""Test multiple writes."""
data = b'0' * 50
self.buf.write(data)
data2 = data + b'1' * 50
self.buf.write(data2)
self.assertEqual(len(data) + len(data2), len(self.buf))
self.assertEqual(2, len(self.buf._queue))
self.assertEqual(data, self.buf._queue[0])
self.assertEqual(data2, self.buf._queue[1])
def test_read(self):
"""Test basic read all method."""
data = b'0' * 50
self.buf.write(data)
data2 = b'1' * 50
self.buf.write(data2)
read = self.buf.read()
self.assertEqual(0, len(self.buf))
self.assertEqual(data + data2, read)
self.assertEqual(0, len(self.buf._queue))
def test_read_partial(self):
"""Test complex read overlapping reads from different 'chunks'."""
data = b'0' * 20 + b'1' * 20
self.buf.write(data)
data2 = b'2' * 50
self.buf.write(data2)
read = self.buf.read(20)
self.assertEqual(70, len(self.buf))
self.assertEqual(data[:20], read)
read = self.buf.read(10)
self.assertEqual(60, len(self.buf))
self.assertEqual(data[20:30], read)
read = self.buf.read(30)
self.assertEqual(30, len(self.buf))
self.assertEqual(data[30:] + data2[:20], read)
read = self.buf.read(40)
self.assertEqual(0, len(self.buf))
self.assertEqual(data2[20:], read)
def test_clear(self):
"""Test clear method."""
data = b'0' * 50
self.buf.write(data)
data2 = b'1' * 50
self.buf.write(data2)
self.assertEqual(len(data) + len(data2), len(self.buf))
self.buf.clear()
self.assertEqual(0, len(self.buf))
self.assertEqual(0, len(self.buf._queue))
class TestObjFile(unittest.TestCase):
"""Test Object File class."""
def setUp(self):
self.bucket = 'sentinel.bucket'
self.name = 'sentinel.name'
def test_init_wrong_mode(self):
"""Test 'rw' mode is not supported."""
self.assertRaises(IOError, gcs_object.GCSObjFile, self.bucket,
self.name, mock.sentinel.credentials, 'rw')
def test_init_wrong_chunk(self):
"""Test chunksize must be 'rw' mode is not supported."""
self.assertRaises(AssertionError, gcs_object.GCSObjFile, self.bucket,
self.name, mock.sentinel.credentials, 'r',
gcs_object.BLOCK_MULTIPLE + 1)
@mock.patch('requests.get', **{'return_value.status_code': 404})
def test_init_read_not_found(self, get_mock):
access_token = 'access_token'
creds = mock.Mock()
creds.get_access_token.return_value.access_token = access_token
self.assertRaises(IOError, gcs_object.GCSObjFile, self.bucket,
self.name, creds, 'r')
@mock.patch('requests.get', **{'return_value.status_code': 200})
def test_init_read_non_json(self, get_mock):
get_mock.return_value.content = 'non_json'
access_token = 'access_token'
creds = mock.Mock()
creds.get_access_token.return_value.access_token = access_token
self.assertRaises(errors.Error, gcs_object.GCSObjFile, self.bucket,
self.name, creds, 'r')
@mock.patch('requests.get', **{'return_value.status_code': 404})
def test_init_read_quote_data(self, get_mock):
access_token = 'access_token'
creds = mock.Mock()
creds.get_access_token.return_value.access_token = access_token
name = 'var/log/message.log'
bucket = '?mybucket'
expected_url = gcs_object.GCSObjFile._URL % ('%3Fmybucket',
'var%2Flog%2Fmessage.log')
self.assertRaises(IOError, gcs_object.GCSObjFile, bucket, name, creds,
'r')
get_mock.assert_called_once_with(expected_url, headers=mock.ANY,
params={'fields': 'size',
'generation': None})
@mock.patch('requests.get', **{'return_value.status_code': 200})
def test_init_read(self, get_mock):
size = 123
get_mock.return_value.content = '{"size": "%s"}' % size
access_token = 'access_token'
chunk = gcs_object.DEFAULT_BLOCK_SIZE * 2
creds = mock.Mock()
creds.authorization = 'Bearer ' + access_token
f = gcs_object.GCSObjFile(self.bucket, self.name, creds, 'r', chunk,
mock.sentinel.retry_params)
self.assertEqual(self.bucket, f.bucket)
self.assertEqual(self.name, f.name)
self.assertEqual(size, f.size)
self.assertEqual(creds, f._credentials)
self.assertEqual(mock.sentinel.retry_params, f._retry_params)
self.assertEqual(chunk, f._chunksize)
self.assertEqual(0, len(f._buffer))
self.assertTrue(f._is_readable())
self.assertFalse(f._is_writable())
self.assertFalse(f.closed)
self.assertEqual(0, f.tell())
self.assertEqual(1, get_mock.call_count)
location = get_mock.call_args[0][0]
self.assertIn(self.bucket, location)
self.assertIn(self.name, location)
headers = get_mock.call_args[1]['headers']
self.assertEqual('Bearer ' + access_token, headers['Authorization'])
def _open(self, mode):
if mode == 'r':
method = 'requests.get'
else:
method = 'requests.post'
self.access_token = 'access_token'
creds = mock.Mock()
creds.authorization = 'Bearer ' + self.access_token
ret_val = mock.Mock(status_code=200, content='{"size": "123"}',
headers={'Location': mock.sentinel.location})
with mock.patch(method, return_value=ret_val):
f = gcs_object.GCSObjFile(self.bucket, self.name, creds, mode)
return f
def test_write_on_read_file(self):
f = self._open('r')
self.assertRaises(IOError, f.write, '')
def test_close_read_file(self):
f = self._open('r')
f.close()
self.assertTrue(f.closed)
# A second close call will do nothing
f.close()
self.assertTrue(f.closed)
def test_operations_on_closed_read_file(self):
f = self._open('r')
f.close()
self.assertRaises(IOError, f.read, '')
self.assertRaises(IOError, f.write, '')
self.assertRaises(IOError, f.tell)
self.assertRaises(IOError, f.seek, 0)
def test_context_manager(self):
with self._open('r') as f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
@mock.patch('requests.post', **{'return_value.status_code': 404})
def test_init_write_not_found(self, head_mock):
access_token = 'access_token'
creds = mock.Mock()
creds.get_access_token.return_value.access_token = access_token
self.assertRaises(IOError, gcs_object.GCSObjFile, self.bucket,
self.name, creds, 'w')
@mock.patch('requests.post', **{'return_value.status_code': 200})
def test_init_write(self, post_mock):
access_token = 'access_token'
creds = mock.Mock()
creds.authorization = 'Bearer ' + access_token
f = gcs_object.GCSObjFile(self.bucket, self.name, creds, 'w',
gcs_object.DEFAULT_BLOCK_SIZE * 2,
mock.sentinel.retry_params)
self.assertEqual(self.bucket, f.bucket)
self.assertEqual(self.name, f.name)
self.assertEqual(0, f.size)
self.assertEqual(creds, f._credentials)
self.assertEqual(mock.sentinel.retry_params, f._retry_params)
self.assertEqual(0, len(f._buffer))
self.assertFalse(f._is_readable())
self.assertTrue(f._is_writable())
self.assertFalse(f.closed)
self.assertEqual(0, f.tell())
self.assertEqual(1, post_mock.call_count)
location = post_mock.call_args[0][0]
self.assertIn(str(self.bucket), location)
params = post_mock.call_args[1]['params']
self.assertIn(self.name, params.values())
headers = post_mock.call_args[1]['headers']
self.assertEqual('Bearer ' + access_token, headers['Authorization'])
def test_read_on_write_file(self):
f = self._open('w')
self.assertRaises(IOError, f.read)
@mock.patch('gcs_client.gcs_object.GCSObjFile._send_data')
def test_close_write_file(self, send_mock):
f = self._open('w')
f.close()
send_mock.assert_called_once_with(b'', 0, finalize=True)
send_mock.reset_mock()
self.assertTrue(f.closed)
# A second close call will do nothing
f.close()
self.assertFalse(send_mock.called)
self.assertTrue(f.closed)
@mock.patch('gcs_client.gcs_object.GCSObjFile._send_data')
def test_operations_on_closed_write_file(self, send_mock):
f = self._open('w')
f.close()
self.assertRaises(IOError, f.read, '')
self.assertRaises(IOError, f.write, '')
self.assertRaises(IOError, f.tell)
self.assertRaises(IOError, f.seek, 0)
def _check_get_call(self, get_mock, index, begin, end):
call_args = get_mock.call_args_list[index]
location = call_args[0][0]
self.assertIn(str(mock.sentinel.bucket), location)
self.assertIn(str(mock.sentinel.name), location)
params = call_args[1]['params']
self.assertEqual('media', params['alt'])
headers = call_args[1]['headers']
self.assertEqual('Bearer ' + self.access_token,
headers['Authorization'])
self.assertEqual('bytes=%s-%s' % (begin, end - 1), headers['Range'])
@mock.patch('requests.get')
def test_read_all_fits_in_1_chunk(self, get_mock):
f = self._open('r')
expected_data = b'0' * (f._chunksize - 1)
get_mock.side_effect = [mock.Mock(status_code=200, headers={},
content=expected_data)]
data = f.read()
self.assertEqual(expected_data, data)
self.assertEqual(1, get_mock.call_count)
self._check_get_call(get_mock, 0, 0, f._chunksize)
# Next call to read will not need to call server
get_mock.reset_mock()
data = f.read()
self.assertEqual('', data)
self.assertFalse(get_mock.called)
f.close()
@mock.patch('requests.put', **{'return_value.status_code': 200})
def test_write_all_fits_in_1_chunk(self, put_mock):
f = self._open('w')
data = b'*' * (f._chunksize - 1)
f.write(data)
# Since we haven't written enough data we shouldn't have sent anything
self.assertFalse(put_mock.called)
# Closing the file will trigger sending the data
f.close()
headers = {'Authorization': 'Bearer ' + self.access_token,
'Content-Range': 'bytes 0-%s/%s' % (len(data) - 1,
len(data))}
put_mock.assert_called_once_with(mock.sentinel.location, data=data,
headers=headers)
@mock.patch('requests.put')
def test_write_all_multiple_chunks(self, put_mock):
put_mock.side_effect = [mock.Mock(status_code=308),
mock.Mock(status_code=200)]
f = self._open('w')
data1 = b'*' * (f._chunksize - 1)
f.write(data1)
# Since we haven't written enough data we shouldn't have sent anything
self.assertFalse(put_mock.called)
data2 = b'-' * f._chunksize
f.write(data2)
# This second write will trigger 1 data send
headers = {'Authorization': 'Bearer ' + self.access_token,
'Content-Range': 'bytes 0-%s/*' % (f._chunksize - 1)}
put_mock.assert_called_once_with(mock.sentinel.location,
data=data1 + data2[0:1],
headers=headers)
put_mock.reset_mock()
# Closing the file will trigger sending the rest of the data
f.close()
headers['Content-Range'] = 'bytes %s-%s/%s' % (f._chunksize,
(f._chunksize * 2) - 2,
f._chunksize * 2 - 1)
put_mock.assert_called_once_with(mock.sentinel.location,
data=data2[1:],
headers=headers)
@mock.patch('requests.put', **{'return_value.status_code': 200})
def test_write_exactly_1_chunk(self, put_mock):
put_mock.side_effect = [mock.Mock(status_code=308),
mock.Mock(status_code=200)]
f = self._open('w')
data = b'*' * f._chunksize
# This will trigger sending the data
f.write(data)
headers = {'Authorization': 'Bearer ' + self.access_token,
'Content-Range': 'bytes 0-%s/*' % (len(data) - 1)}
put_mock.assert_called_once_with(mock.sentinel.location, data=data,
headers=headers)
# Closing the file will trigger sending the finalization of the file
put_mock.reset_mock()
f.close()
headers['Content-Range'] = 'bytes */%s' % f._chunksize
put_mock.assert_called_once_with(mock.sentinel.location, data=b'',
headers=headers)
@mock.patch('requests.get')
def test_read_all_multiple_chunks(self, get_mock):
f = self._open('r')
expected_data = b'0' * ((f._chunksize - 1) * 2)
get_mock.side_effect = [
mock.Mock(status_code=206, content=expected_data[:f._chunksize]),
mock.Mock(status_code=200, content=expected_data[f._chunksize:])]
data = f.read()
self.assertEqual(expected_data, data)
self.assertEqual(2, get_mock.call_count)
offsets = ((0, f._chunksize), (f._chunksize, 2 * f._chunksize))
for i in range(2):
self._check_get_call(get_mock, i, offsets[i][0], offsets[i][1])
# Next call to read will not need to call server
get_mock.reset_mock()
data = f.read()
self.assertEqual('', data)
self.assertFalse(get_mock.called)
f.close()
@mock.patch('requests.get')
def test_read_all_multiple_chunks_exact_size_no_header(self, get_mock):
f = self._open('r')
expected_data = b'0' * (f._chunksize * 2)
get_mock.side_effect = [
mock.Mock(status_code=206, content=expected_data[:f._chunksize]),
mock.Mock(status_code=206, content=expected_data[f._chunksize:]),
mock.Mock(status_code=416, content='Error blah, blah')]
data = f.read()
self.assertEqual(expected_data, data)
self.assertEqual(3, get_mock.call_count)
offsets = ((0, f._chunksize), (f._chunksize, 2 * f._chunksize),
(2 * f._chunksize, 3 * f._chunksize))
for i in range(3):
self._check_get_call(get_mock, i, offsets[i][0], offsets[i][1])
# Next call to read will not need to call server
get_mock.reset_mock()
data = f.read()
self.assertEqual('', data)
self.assertFalse(get_mock.called)
f.close()
@mock.patch('requests.get')
def test_read_all_multiple_chunks_exact_size_with_header(self, get_mock):
f = self._open('r')
offsets = ((0, f._chunksize), (f._chunksize, 2 * f._chunksize))
expected_data = b'0' * (f._chunksize * 2)
ranges = [{'Content-Range': 'bytes=%s-%s/%s' % (o[0], o[1] - 1,
offsets[-1][1])}
for o in offsets]
get_mock.side_effect = [
mock.Mock(status_code=206, content=expected_data[:f._chunksize],
headers=ranges[0]),
mock.Mock(status_code=206, content=expected_data[f._chunksize:],
headers=ranges[1])]
data = f.read()
self.assertEqual(expected_data, data)
self.assertEqual(2, get_mock.call_count)
for i in range(2):
self._check_get_call(get_mock, i, offsets[i][0], offsets[i][1])
# Next call to read will not need to call server
get_mock.reset_mock()
data = f.read()
self.assertEqual('', data)
self.assertFalse(get_mock.called)
f.close()
@mock.patch('requests.get')
def test_read_size_multiple_chunks(self, get_mock):
f = self._open('r')
offsets = ((0, f._chunksize), (f._chunksize, 2 * f._chunksize))
expected_data = b'0' * ((f._chunksize - 1) * 2)
get_mock.side_effect = [
mock.Mock(status_code=206, content=expected_data[:f._chunksize]),
mock.Mock(status_code=200, content=expected_data[f._chunksize:])]
size = int(f._chunksize / 4)
data = f.read(size)
self.assertEqual(expected_data[:size], data)
self.assertEqual(1, get_mock.call_count)
self._check_get_call(get_mock, 0, offsets[0][0], offsets[0][1])
get_mock.reset_mock()
data = f.read(size)
self.assertEqual(expected_data[size:2*size], data)
self.assertFalse(get_mock.called)
data = f.read(0)
self.assertEqual('', data)
self.assertFalse(get_mock.called)
data = f.read(2 * f._chunksize)
self.assertEqual(expected_data[2*size:], data)
self._check_get_call(get_mock, 0, offsets[1][0], offsets[1][1])
# Next call to read will not need to call server
get_mock.reset_mock()
data = f.read()
self.assertEqual('', data)
self.assertFalse(get_mock.called)
f.close()
@mock.patch('requests.get', **{'return_value.status_code': 404})
def test_read_error(self, get_mock):
with self._open('r') as f:
self.assertRaises(gcs_object.errors.NotFound, f.read)
@mock.patch('requests.get')
def test_get_data_size_0(self, get_mock):
get_mock.return_value = mock.Mock(status_code=200, content='data')
with self._open('r') as f:
data = f._get_data(0)
self.assertEqual('', data)
self.assertFalse(get_mock.called)
def _check_seek(self, offset, whence, expected_initial=None):
with mock.patch('requests.get') as get_mock:
block = gcs_object.DEFAULT_BLOCK_SIZE
f = self._open('r')
f.size = 4 * block
if expected_initial is None:
expected_initial = f.size
expected_data = b'0' * block
get_mock.return_value = mock.Mock(status_code=206,
content=expected_data)
f.read(2 * block)
f.seek(offset, whence)
self.assertEqual(0, len(f._buffer))
f.read(block)
offsets = ((0, block), (block, 2 * block),
(expected_initial, expected_initial + block))
for i in range(len(offsets)):
self._check_get_call(get_mock, i, offsets[i][0], offsets[i][1])
f.close()
def test_seek_read_set(self):
self._check_seek(10, os.SEEK_SET, 10)
def test_seek_read_set_beyond_bof(self):
self._check_seek(-10, os.SEEK_SET, 0)
def test_seek_read_set_beyond_eof(self):
self._check_seek(six.MAXSIZE, os.SEEK_SET)
def test_seek_read_cur(self):
self._check_seek(10, os.SEEK_CUR,
10 + (2 * gcs_object.DEFAULT_BLOCK_SIZE))
def test_seek_read_cur_negative(self):
self._check_seek(-10, os.SEEK_CUR,
-10 + (2 * gcs_object.DEFAULT_BLOCK_SIZE))
def test_seek_read_cur_beyond_bof(self):
self._check_seek(-3 * gcs_object.DEFAULT_BLOCK_SIZE, os.SEEK_CUR, 0)
def test_seek_read_cur_beyond_eof(self):
self._check_seek(six.MAXSIZE, os.SEEK_CUR,
4 * gcs_object.DEFAULT_BLOCK_SIZE)
def test_seek_read_end_negative(self):
self._check_seek(-10, os.SEEK_END,
-10 + (4 * gcs_object.DEFAULT_BLOCK_SIZE))
def test_seek_read_end_beyond_bof(self):
self._check_seek(-six.MAXSIZE, os.SEEK_END, 0)
def test_seek_read_end_beyond_eof(self):
self._check_seek(six.MAXSIZE, os.SEEK_END,
4 * gcs_object.DEFAULT_BLOCK_SIZE)
@mock.patch('requests.get')
def test_seek_read_wrong_whence(self, get_mock):
with self._open('r') as f:
self.assertRaises(ValueError, f.seek, 0, -1)
|
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests to the conductor service."""
from oslo.config import cfg
from nova.conductor import manager
from nova.conductor import rpcapi
from nova import exception as exc
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova import utils
conductor_opts = [
cfg.BoolOpt('use_local',
default=False,
help='Perform nova-conductor operations locally'),
cfg.StrOpt('topic',
default='conductor',
help='the topic conductor nodes listen on'),
cfg.StrOpt('manager',
default='nova.conductor.manager.ConductorManager',
help='full class name for the Manager for conductor'),
]
conductor_group = cfg.OptGroup(name='conductor',
title='Conductor Options')
CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
LOG = logging.getLogger(__name__)
class LocalAPI(object):
"""A local version of the conductor API that does database updates
locally instead of via RPC"""
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(manager.ConductorManager())
def wait_until_ready(self, context, *args, **kwargs):
# nothing to wait for in the local case.
pass
def ping(self, context, arg, timeout=None):
return self._manager.ping(context, arg)
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'compute')
def instance_get(self, context, instance_id):
return self._manager.instance_get(context, instance_id)
def instance_get_by_uuid(self, context, instance_uuid):
return self._manager.instance_get_by_uuid(context, instance_uuid)
def instance_destroy(self, context, instance):
return self._manager.instance_destroy(context, instance)
def instance_get_all(self, context):
return self._manager.instance_get_all(context)
def instance_get_all_by_host(self, context, host):
return self._manager.instance_get_all_by_host(context, host)
def instance_get_all_by_host_and_node(self, context, host, node):
return self._manager.instance_get_all_by_host(context, host, node)
def instance_get_all_by_filters(self, context, filters,
sort_key='created_at',
sort_dir='desc'):
return self._manager.instance_get_all_by_filters(context,
filters,
sort_key,
sort_dir)
def instance_get_all_hung_in_rebooting(self, context, timeout):
return self._manager.instance_get_all_hung_in_rebooting(context,
timeout)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
return self._manager.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
def instance_info_cache_update(self, context, instance, values):
return self._manager.instance_info_cache_update(context,
instance,
values)
def instance_info_cache_delete(self, context, instance):
return self._manager.instance_info_cache_delete(context, instance)
def instance_type_get(self, context, instance_type_id):
return self._manager.instance_type_get(context, instance_type_id)
def instance_fault_create(self, context, values):
return self._manager.instance_fault_create(context, values)
def migration_get(self, context, migration_id):
return self._manager.migration_get(context, migration_id)
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
return self._manager.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
def migration_get_in_progress_by_host_and_node(self, context, host, node):
return self._manager.migration_get_in_progress_by_host_and_node(
context, host, node)
def migration_create(self, context, instance, values):
return self._manager.migration_create(context, instance, values)
def migration_update(self, context, migration, status):
return self._manager.migration_update(context, migration, status)
def aggregate_host_add(self, context, aggregate, host):
return self._manager.aggregate_host_add(context, aggregate, host)
def aggregate_host_delete(self, context, aggregate, host):
return self._manager.aggregate_host_delete(context, aggregate, host)
def aggregate_get(self, context, aggregate_id):
return self._manager.aggregate_get(context, aggregate_id)
def aggregate_get_by_host(self, context, host, key=None):
return self._manager.aggregate_get_by_host(context, host, key)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
return self._manager.aggregate_metadata_add(context, aggregate,
metadata,
set_delete)
def aggregate_metadata_delete(self, context, aggregate, key):
return self._manager.aggregate_metadata_delete(context,
aggregate,
key)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
return self._manager.aggregate_metadata_get_by_host(context,
host,
key)
def bw_usage_get(self, context, uuid, start_period, mac):
return self._manager.bw_usage_update(context, uuid, mac, start_period)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed=None):
return self._manager.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out,
last_ctr_in, last_ctr_out,
last_refreshed)
def get_backdoor_port(self, context, host):
raise exc.InvalidRequest
def security_group_get_by_instance(self, context, instance):
return self._manager.security_group_get_by_instance(context, instance)
def security_group_rule_get_by_security_group(self, context, secgroup):
return self._manager.security_group_rule_get_by_security_group(
context, secgroup)
def provider_fw_rule_get_all(self, context):
return self._manager.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
return self._manager.agent_build_get_by_triple(context, hypervisor,
os, architecture)
def block_device_mapping_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values,
create=True)
def block_device_mapping_update(self, context, bdm_id, values):
values = dict(values)
values['id'] = bdm_id
return self._manager.block_device_mapping_update_or_create(
context, values, create=False)
def block_device_mapping_update_or_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values)
def block_device_mapping_get_all_by_instance(self, context, instance):
return self._manager.block_device_mapping_get_all_by_instance(
context, instance)
def block_device_mapping_destroy(self, context, bdms):
return self._manager.block_device_mapping_destroy(context, bdms=bdms)
def block_device_mapping_destroy_by_instance_and_device(self, context,
instance,
device_name):
return self._manager.block_device_mapping_destroy(
context, instance=instance, device_name=device_name)
def block_device_mapping_destroy_by_instance_and_volume(self, context,
instance,
volume_id):
return self._manager.block_device_mapping_destroy(
context, instance=instance, volume_id=volume_id)
def vol_get_usage_by_time(self, context, start_time):
return self._manager.vol_get_usage_by_time(context, start_time)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
return self._manager.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance, last_refreshed,
update_totals)
def service_get_all(self, context):
return self._manager.service_get_all_by(context)
def service_get_all_by_topic(self, context, topic):
return self._manager.service_get_all_by(context, topic=topic)
def service_get_all_by_host(self, context, host):
return self._manager.service_get_all_by(context, host=host)
def service_get_by_host_and_topic(self, context, host, topic):
return self._manager.service_get_all_by(context, topic, host)
def service_get_by_compute_host(self, context, host):
result = self._manager.service_get_all_by(context, 'compute', host)
# FIXME(comstud): A major revision bump to 2.0 should return a
# single entry, so we should just return 'result' at that point.
return result[0]
def service_get_by_args(self, context, host, binary):
return self._manager.service_get_all_by(context, host=host,
binary=binary)
def action_event_start(self, context, values):
return self._manager.action_event_start(context, values)
def action_event_finish(self, context, values):
return self._manager.action_event_finish(context, values)
def service_create(self, context, values):
return self._manager.service_create(context, values)
def service_destroy(self, context, service_id):
return self._manager.service_destroy(context, service_id)
def compute_node_create(self, context, values):
return self._manager.compute_node_create(context, values)
def compute_node_update(self, context, node, values, prune_stats=False):
return self._manager.compute_node_update(context, node, values,
prune_stats)
def service_update(self, context, service, values):
return self._manager.service_update(context, service, values)
def task_log_get(self, context, task_name, begin, end, host, state=None):
return self._manager.task_log_get(context, task_name, begin, end,
host, state)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
return self._manager.task_log_begin_task(context, task_name,
begin, end, host,
task_items, message)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
return self._manager.task_log_end_task(context, task_name,
begin, end, host,
errors, message)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
return self._manager.notify_usage_exists(
context, instance, current_period, ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, *args):
return self._manager.security_groups_trigger_handler(context,
event, args)
def security_groups_trigger_members_refresh(self, context, group_ids):
return self._manager.security_groups_trigger_members_refresh(context,
group_ids)
def network_migrate_instance_start(self, context, instance, migration):
return self._manager.network_migrate_instance_start(context,
instance,
migration)
def network_migrate_instance_finish(self, context, instance, migration):
return self._manager.network_migrate_instance_finish(context,
instance,
migration)
def quota_commit(self, context, reservations):
return self._manager.quota_commit(context, reservations)
def quota_rollback(self, context, reservations):
return self._manager.quota_rollback(context, reservations)
def get_ec2_ids(self, context, instance):
return self._manager.get_ec2_ids(context, instance)
def compute_stop(self, context, instance, do_cast=True):
return self._manager.compute_stop(context, instance, do_cast)
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager."""
def __init__(self):
self.conductor_rpcapi = rpcapi.ConductorAPI()
def wait_until_ready(self, context, early_timeout=10, early_attempts=10):
'''Wait until a conductor service is up and running.
This method calls the remote ping() method on the conductor topic until
it gets a response. It starts with a shorter timeout in the loop
(early_timeout) up to early_attempts number of tries. It then drops
back to the globally configured timeout for rpc calls for each retry.
'''
attempt = 0
timeout = early_timeout
while True:
# NOTE(danms): Try ten times with a short timeout, and then punt
# to the configured RPC timeout after that
if attempt == early_attempts:
timeout = None
attempt += 1
# NOTE(russellb): This is running during service startup. If we
# allow an exception to be raised, the service will shut down.
# This may fail the first time around if nova-conductor wasn't
# running when this service started.
try:
self.ping(context, '1.21 GigaWatts', timeout=timeout)
break
except rpc_common.Timeout as e:
LOG.warning(_('Timed out waiting for nova-conductor. '
'Is it running? Or did this service start '
'before nova-conductor?'))
def ping(self, context, arg, timeout=None):
return self.conductor_rpcapi.ping(context, arg, timeout)
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self.conductor_rpcapi.instance_update(context, instance_uuid,
updates, 'conductor')
def instance_destroy(self, context, instance):
return self.conductor_rpcapi.instance_destroy(context, instance)
def instance_get(self, context, instance_id):
return self.conductor_rpcapi.instance_get(context, instance_id)
def instance_get_by_uuid(self, context, instance_uuid):
return self.conductor_rpcapi.instance_get_by_uuid(context,
instance_uuid)
def instance_get_all(self, context):
return self.conductor_rpcapi.instance_get_all(context)
def instance_get_all_by_host(self, context, host):
return self.conductor_rpcapi.instance_get_all_by_host(context, host)
def instance_get_all_by_host_and_node(self, context, host, node):
return self.conductor_rpcapi.instance_get_all_by_host(context,
host, node)
def instance_get_all_by_filters(self, context, filters,
sort_key='created_at',
sort_dir='desc'):
return self.conductor_rpcapi.instance_get_all_by_filters(context,
filters,
sort_key,
sort_dir)
def instance_get_all_hung_in_rebooting(self, context, timeout):
return self.conductor_rpcapi.instance_get_all_hung_in_rebooting(
context, timeout)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
return self.conductor_rpcapi.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
def instance_info_cache_update(self, context, instance, values):
return self.conductor_rpcapi.instance_info_cache_update(context,
instance, values)
def instance_info_cache_delete(self, context, instance):
return self.conductor_rpcapi.instance_info_cache_delete(context,
instance)
def instance_type_get(self, context, instance_type_id):
return self.conductor_rpcapi.instance_type_get(context,
instance_type_id)
def instance_fault_create(self, context, values):
return self.conductor_rpcapi.instance_fault_create(context, values)
def migration_get(self, context, migration_id):
return self.conductor_rpcapi.migration_get(context, migration_id)
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
crpcapi = self.conductor_rpcapi
return crpcapi.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
def migration_get_in_progress_by_host_and_node(self, context, host, node):
crpcapi = self.conductor_rpcapi
return crpcapi.migration_get_in_progress_by_host_and_node(context,
host, node)
def migration_create(self, context, instance, values):
return self.conductor_rpcapi.migration_create(context, instance,
values)
def migration_update(self, context, migration, status):
return self.conductor_rpcapi.migration_update(context, migration,
status)
def aggregate_host_add(self, context, aggregate, host):
return self.conductor_rpcapi.aggregate_host_add(context, aggregate,
host)
def aggregate_host_delete(self, context, aggregate, host):
return self.conductor_rpcapi.aggregate_host_delete(context, aggregate,
host)
def aggregate_get(self, context, aggregate_id):
return self.conductor_rpcapi.aggregate_get(context, aggregate_id)
def aggregate_get_by_host(self, context, host, key=None):
return self.conductor_rpcapi.aggregate_get_by_host(context, host, key)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
return self.conductor_rpcapi.aggregate_metadata_add(context, aggregate,
metadata,
set_delete)
def aggregate_metadata_delete(self, context, aggregate, key):
return self.conductor_rpcapi.aggregate_metadata_delete(context,
aggregate,
key)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
return self.conductor_rpcapi.aggregate_metadata_get_by_host(context,
host,
key)
def bw_usage_get(self, context, uuid, start_period, mac):
return self.conductor_rpcapi.bw_usage_update(context, uuid, mac,
start_period)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed=None):
return self.conductor_rpcapi.bw_usage_update(
context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed)
#NOTE(mtreinish): This doesn't work on multiple conductors without any
# topic calculation in conductor_rpcapi. So the host param isn't used
# currently.
def get_backdoor_port(self, context, host):
return self.conductor_rpcapi.get_backdoor_port(context)
def security_group_get_by_instance(self, context, instance):
return self.conductor_rpcapi.security_group_get_by_instance(context,
instance)
def security_group_rule_get_by_security_group(self, context, secgroup):
return self.conductor_rpcapi.security_group_rule_get_by_security_group(
context, secgroup)
def provider_fw_rule_get_all(self, context):
return self.conductor_rpcapi.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
return self.conductor_rpcapi.agent_build_get_by_triple(context,
hypervisor,
os,
architecture)
def block_device_mapping_create(self, context, values):
return self.conductor_rpcapi.block_device_mapping_update_or_create(
context, values, create=True)
def block_device_mapping_update(self, context, bdm_id, values):
values = dict(values)
values['id'] = bdm_id
return self.conductor_rpcapi.block_device_mapping_update_or_create(
context, values, create=False)
def block_device_mapping_update_or_create(self, context, values):
return self.conductor_rpcapi.block_device_mapping_update_or_create(
context, values)
def block_device_mapping_get_all_by_instance(self, context, instance):
return self.conductor_rpcapi.block_device_mapping_get_all_by_instance(
context, instance)
def block_device_mapping_destroy(self, context, bdms):
return self.conductor_rpcapi.block_device_mapping_destroy(context,
bdms=bdms)
def block_device_mapping_destroy_by_instance_and_device(self, context,
instance,
device_name):
return self.conductor_rpcapi.block_device_mapping_destroy(
context, instance=instance, device_name=device_name)
def block_device_mapping_destroy_by_instance_and_volume(self, context,
instance,
volume_id):
return self.conductor_rpcapi.block_device_mapping_destroy(
context, instance=instance, volume_id=volume_id)
def vol_get_usage_by_time(self, context, start_time):
return self.conductor_rpcapi.vol_get_usage_by_time(context, start_time)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
return self.conductor_rpcapi.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance, last_refreshed,
update_totals)
def service_get_all(self, context):
return self.conductor_rpcapi.service_get_all_by(context)
def service_get_all_by_topic(self, context, topic):
return self.conductor_rpcapi.service_get_all_by(context, topic=topic)
def service_get_all_by_host(self, context, host):
return self.conductor_rpcapi.service_get_all_by(context, host=host)
def service_get_by_host_and_topic(self, context, host, topic):
return self.conductor_rpcapi.service_get_all_by(context, topic, host)
def service_get_by_compute_host(self, context, host):
result = self.conductor_rpcapi.service_get_all_by(context, 'compute',
host)
# FIXME(comstud): A major revision bump to 2.0 should return a
# single entry, so we should just return 'result' at that point.
return result[0]
def service_get_by_args(self, context, host, binary):
return self.conductor_rpcapi.service_get_all_by(context, host=host,
binary=binary)
def action_event_start(self, context, values):
return self.conductor_rpcapi.action_event_start(context, values)
def action_event_finish(self, context, values):
return self.conductor_rpcapi.action_event_finish(context, values)
def service_create(self, context, values):
return self.conductor_rpcapi.service_create(context, values)
def service_destroy(self, context, service_id):
return self.conductor_rpcapi.service_destroy(context, service_id)
def compute_node_create(self, context, values):
return self.conductor_rpcapi.compute_node_create(context, values)
def compute_node_update(self, context, node, values, prune_stats=False):
return self.conductor_rpcapi.compute_node_update(context, node,
values, prune_stats)
def service_update(self, context, service, values):
return self.conductor_rpcapi.service_update(context, service, values)
def task_log_get(self, context, task_name, begin, end, host, state=None):
return self.conductor_rpcapi.task_log_get(context, task_name, begin,
end, host, state)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
return self.conductor_rpcapi.task_log_begin_task(context, task_name,
begin, end, host,
task_items, message)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
return self.conductor_rpcapi.task_log_end_task(context, task_name,
begin, end, host,
errors, message)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
return self.conductor_rpcapi.notify_usage_exists(
context, instance, current_period, ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, *args):
return self.conductor_rpcapi.security_groups_trigger_handler(context,
event,
args)
def security_groups_trigger_members_refresh(self, context, group_ids):
return self.conductor_rpcapi.security_groups_trigger_members_refresh(
context, group_ids)
def network_migrate_instance_start(self, context, instance, migration):
return self.conductor_rpcapi.network_migrate_instance_start(context,
instance,
migration)
def network_migrate_instance_finish(self, context, instance, migration):
return self.conductor_rpcapi.network_migrate_instance_finish(context,
instance,
migration)
def quota_commit(self, context, reservations):
return self.conductor_rpcapi.quota_commit(context, reservations)
def quota_rollback(self, context, reservations):
return self.conductor_rpcapi.quota_rollback(context, reservations)
def get_ec2_ids(self, context, instance):
return self.conductor_rpcapi.get_ec2_ids(context, instance)
def compute_stop(self, context, instance, do_cast=True):
return self.conductor_rpcapi.compute_stop(context, instance, do_cast)
|
|
import uuid
import logging
import time
import binascii
from utils import table_exists
class Index (object):
"""
Index is an instance of an index of an entity. An index is a matching
between any entity property and an entity id. It is used for performing
look ups that do not use an entity id. The following is an example:
You have an entity that holds user information (the dict you pass
in are the parameter defaults):
user = Entity('users',{
'name':''
})
new_user = user.create({
'name':'Ian'
})
But what if you want to find entities by their name and not just
their 16-byte binary uuid? Well then you need to create an index
for that property.
user.add_index(Index(user, property='name'))
Now you can perform .find or .count operations on your User Entity
Store like the following:
ian = user.find(name='Ian')
The following is an example of a relationship between two entities.
If you have a user entity, and each user is connected with a team
then you can create a relationship between the two like so.
user = Entity('users', {
'name':'My Name',
'team_id':''
})
team = Entity('teams', {
'name':'My Team'
})
Then you can create a index like the following to represent a
realtionship between user and team (in this case a many-to-one):
user.add_index(Index(user, property='team_id', relationship=True))
If you wanted to create a many-to-many relatinoship you can do the
following:
user.add_index(Index(user, property='team_id', relationship=True))
team.add_index(Index(team, property='user_id', relationship=True))
If you want a one-to-one relationship you will need to enforce that
restraint yourself by performing some sort of .count() method.
Finally, to find all users in a team is simple.
team_members = user.find(team_id=cur_team['id'])
Only one index per property.
"""
def __init__ (self, entity, property, **params):
"""
Class Constructor
entity
The entity you want to index, it will use the same database.
If sharding is turned on, then it will use the same db pool.
property
The property you want to index
Optional Parameters:
shard_on
The value you want to shard users based upon. If you want to
shard based upon user_id then set shard_on = 'user_id'.
If no value is given, then no sharding is performed.
relationship
Set value to true if the property is a uuid meaning that the
value is a pointer to another entity in the database.
multi
Set true, otherwise, assumed false, that the indexed field
is a list of values. Therefore, it will automatically create
n rows in the index table for the n values in the list.
"""
# Setup the index
self.table = '%s_%s_index' % (entity.table, property)
self.entity_key_id = entity.key_id
self.property = property
self.db = entity.db
self.shard_on = None
self.relationship = False
self.multi_mode = False
if 'shard_on' in params:
self.shard_on = params['shard_on']
if 'relationship' in params:
self.relationship = params['relationship']
if 'multi' in params:
self.multi_mode = params['multi_mode']
def create (self, params):
"""
Takes an Entity dictionary and then creates the associated indexes.
"""
sql = 'INSERT INTO %s (%s, %s, datetime) VALUES ' \
'(%%s, %%s, %%s)' % (
self.table,
self.entity_key_id,
self.property
)
logging.info('Insert into index table[%s]: %s --> %s %s = %s' % (
self.table,
params['id'],
params[self.property],
self.entity_key_id
params['id']
))
logging.debug('SQL Insert Index Syntax: %s' %(sql))
query = self.db.execute(sql,
binascii.a2b_hex(params['id']),
params[self.property],
int(time.time())
)
def update (self, params):
"""
Updates the index row(s) for the given parameter
"""
sql = 'UPDATE %s SET %s = %%s WHERE %s = %%s LIMIT 1' % (
self.table,
self.property,
self.key_id
)
logging.info('Updating index table[%s]: %s --> %s for %s=%s' % (
self.table,
self.property,
params[self.property]
self.entity_key_id,
params['id']
))
logging.debug('SQL Update Index Syntax: %s' % (sql))
self.db.execute(sql,
params[self.property],
binascii.a2b_hex(params['id'])
)
def remove (self, params):
"""
Removes the index row(s) for the given parameters associated with
the entity key id.
"""
sql = 'DELETE FROM %s WHERE %s = %%s' % (
self.table,
self.entity_key_id
)
logging.info('Deleting from index table[%s]: %s --> %s' % (
self.entity_key_id,
params['id']
))
logging.debug('SQL Delete Index Syntax: %s' % (sql))
self.db.execute(sql, binascii.a2b_hex(params['id']))
def find (self, values):
"""
Returns a list of uuid instances that represent the id's for
entities that are indexed by this instance.
If you are performing a relationship based look up, the id's for
that relationship must also be instance of uuid's.
"""
if not type(values) == list:
values = [values]
if len(values) == 0:
return None
if self.relationship:
values = [binascii.a2b_hex(value) for value in values]
# Construct the sql and then perform our query.
sql = 'SELECT %s FROM %s WHERE %s IN (%s)' % (
self.entity_key_id,
self.table,
self.property,
','.join(['%s']*len(values))
)
logging.debug('Selecting Index SQL: %s' % (sql))
logging.info('Performing Index[%s] Lookup: %s' % (
self.table,
values
))
query = self.db.query(sql, *values)
if not query:
return None
return [binascii.b2a_hex(row[self.entity_key_id]) for row in query]
def count (self, values):
"""
Returns an integer for the number of matching rows in the index.
"""
if type(values) != list:
values = [values]
if len(values) == 0:
return 0
if self.relationship:
values = [binascii.a2b_hex(value) for value in values]
sql = 'SELECT Count(*) num FROM %s WHERE %s IN (%s)' % (
self.table,
self.property,
','.join(['%s']*len(values))
)
logging.debug('SQL Index Count[%s=%s]: %s' % (
self.property,
values,
sql
))
query = self.db.query(sql, *values)
return query[0]['num']
def create_table (self):
"""
Creates the table in the database for this index if and only if the
table does not already exist.
NOTE: This method is used by the tools script for creating the
tables for indexes.
"""
if table_exists(self):
logging.warning('Index Table %s already exists in database' % (
self.table
))
return
logging.info('Creating Index Table %s' % (self.table))
property_type = 'varchar(255)'
if self.relationship:
property_type = 'binary(16)'
sql = """CREATE TABLE IF NOT EXISTS %s (
%s binary(16) not null,
%s %s not null,
datetime int not null,
PRIMARY KEY (%s,%s)
) CHARSET=utf8 COLLATE=utf8_unicode_ci ENGINE=InnoDB""" % (
self.table,
self.entity_key_id,
self.property,
property_type,
self.entity_key_id,
self.property
)
self.db.execute(sql)
def __str__ (self):
return self.__repr__()
def __repr__(self):
return '<Index(table=%s)>' % (self.table)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) Spyder Project Contributors
# Licensed under the terms of the MIT License
"""Qt widgets for the notebook."""
# Standard library imports
import json
import os
import os.path as osp
from string import Template
import sys
# Qt imports
from qtpy.QtCore import QUrl, Qt
from qtpy.QtGui import QFontMetrics, QFont
from qtpy.QtWebEngineWidgets import (QWebEnginePage, QWebEngineSettings,
QWebEngineView, WEBENGINE)
from qtpy.QtWidgets import (QApplication, QMenu, QVBoxLayout, QWidget,
QMessageBox)
# Third-party imports
from notebook.utils import url_path_join, url_escape
import requests
# Spyder imports
from spyder.config.base import get_image_path, get_module_source_path
from spyder.utils.qthelpers import add_actions
from spyder.utils import sourcecode
from spyder.widgets.findreplace import FindReplace
# Local imports
from spyder_notebook.utils.localization import _
from spyder_notebook.widgets.dom import DOMWidget
# -----------------------------------------------------------------------------
# Templates
# -----------------------------------------------------------------------------
# Using the same css file from the Help plugin for now. Maybe
# later it'll be a good idea to create a new one.
PLUGINS_PATH = get_module_source_path('spyder', 'plugins')
CSS_PATH = osp.join(PLUGINS_PATH, 'help', 'utils', 'static', 'css')
TEMPLATES_PATH = osp.join(
PLUGINS_PATH, 'ipythonconsole', 'assets', 'templates')
open(osp.join(TEMPLATES_PATH, 'blank.html'))
BLANK = open(osp.join(TEMPLATES_PATH, 'blank.html')).read()
LOADING = open(osp.join(TEMPLATES_PATH, 'loading.html')).read()
KERNEL_ERROR = open(osp.join(TEMPLATES_PATH, 'kernel_error.html')).read()
# -----------------------------------------------------------------------------
# Widgets
# -----------------------------------------------------------------------------
class WebViewInBrowser(QWebEngineView):
"""
WebView which opens document in an external browser.
This is a subclass of QWebEngineView, which as soon as the URL is set,
opens the web page in an external browser and closes itself. It is used
in NotebookWidget to open links.
"""
def __init__(self, parent):
"""Construct object."""
super().__init__(parent)
self.urlChanged.connect(self.open_in_browser)
def open_in_browser(self, url):
"""
Open web page in external browser and close self.
Parameters
----------
url : QUrl
URL of web page to open in browser
"""
import webbrowser
try:
webbrowser.open(url.toString())
except ValueError:
# See: spyder-ide/spyder#9849
pass
self.stop()
self.close()
class NotebookWidget(DOMWidget):
"""WebView widget for notebooks."""
def __init__(self, parent, actions=None):
"""
Constructor.
Parameters
----------
parent : QWidget
Parent of the widget under construction.
actions : list of (QAction or QMenu or None) or None, optional
Actions to be added to the context menu of the widget under
construction. The default is None, meaning that no actions
will be added.
"""
super().__init__(parent)
self.actions = actions
def contextMenuEvent(self, event):
"""
Handle context menu events.
This overrides WebView.contextMenuEvent() in order to add the
actions in `self.actions` and remove the Back and Forward actions
which have no meaning for the notebook widget.
If Shift is pressed, then instead display the standard Qt context menu,
per gh:spyder-ide/spyder-notebook#279
Parameters
----------
event : QContextMenuEvent
The context menu event that needs to be handled.
"""
if QApplication.keyboardModifiers() & Qt.ShiftModifier:
return QWebEngineView.contextMenuEvent(self, event)
if self.actions is None:
actions = []
else:
actions = self.actions + [None]
actions += [
self.pageAction(QWebEnginePage.SelectAll),
self.pageAction(QWebEnginePage.Copy),
None,
self.zoom_in_action,
self.zoom_out_action]
if not WEBENGINE:
settings = self.page().settings()
settings.setAttribute(QWebEngineSettings.DeveloperExtrasEnabled,
True)
actions += [None, self.pageAction(QWebEnginePage.InspectElement)]
menu = QMenu(self)
add_actions(menu, actions)
menu.popup(event.globalPos())
event.accept()
def show_blank(self):
"""Show a blank page."""
self.setHtml(BLANK)
def show_kernel_error(self, error):
"""Show kernel initialization errors."""
# Remove unneeded blank lines at the beginning
eol = sourcecode.get_eol_chars(error)
if eol:
error = error.replace(eol, '<br>')
# Don't break lines in hyphens
# From http://stackoverflow.com/q/7691569/438386
error = error.replace('-', '‑')
message = _("An error occurred while starting the kernel")
kernel_error_template = Template(KERNEL_ERROR)
page = kernel_error_template.substitute(css_path=CSS_PATH,
message=message,
error=error)
self.setHtml(page)
def show_loading_page(self):
"""Show a loading animation while the kernel is starting."""
loading_template = Template(LOADING)
loading_img = get_image_path('loading_sprites.png')
if os.name == 'nt':
loading_img = loading_img.replace('\\', '/')
message = _("Connecting to kernel...")
page = loading_template.substitute(css_path=CSS_PATH,
loading_img=loading_img,
message=message)
self.setHtml(page)
def show_message(self, page):
"""Show a message page with the given .html file."""
self.setHtml(page)
def createWindow(self, webWindowType):
"""
Create new browser window.
This function is called by Qt if the user clicks on a link in the
notebook. The goal is to open the web page in an external browser.
To that end, we create and return an object which will open the browser
when Qt sets the URL.
"""
return WebViewInBrowser(self.parent())
class NotebookClient(QWidget):
"""
Notebook client for Spyder.
This is a widget composed of a NotebookWidget and a find dialog to
render notebooks.
Attributes
----------
server_url : str or None
URL to send requests to; set by register().
"""
def __init__(self, parent, filename, actions=None, ini_message=None):
"""
Constructor.
Parameters
----------
parent : QWidget
Parent of the widget under construction.
filename : str
File name of the notebook.
actions : list of (QAction or QMenu or None) or None, optional
Actions to be added to the context menu of the widget under
construction. The default is None, meaning that no actions
will be added.
ini_message : str or None, optional
HTML to be initially displayed in the widget. The default is
None, meaning that an empty page is displayed initially.
"""
super().__init__(parent)
if os.name == 'nt':
filename = filename.replace('/', '\\')
self.filename = filename
self.file_url = None
self.server_url = None
self.path = None
self.notebookwidget = NotebookWidget(self, actions)
if ini_message:
self.notebookwidget.show_message(ini_message)
self.static = True
else:
self.notebookwidget.show_blank()
self.static = False
self.find_widget = FindReplace(self)
self.find_widget.set_editor(self.notebookwidget)
self.find_widget.hide()
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.notebookwidget)
layout.addWidget(self.find_widget)
self.setLayout(layout)
def add_token(self, url):
"""Add notebook token to a given url."""
token_url = url + '?token={}'.format(self.token)
return token_url
def register(self, server_info):
"""Register attributes that can be computed with the server info."""
# Path relative to the server directory
self.path = os.path.relpath(self.filename,
start=server_info['notebook_dir'])
# Replace backslashes on Windows
if os.name == 'nt':
self.path = self.path.replace('\\', '/')
# Server url to send requests to
self.server_url = server_info['url']
# Server token
self.token = server_info['token']
url = url_path_join(self.server_url, 'notebook',
url_escape(self.path))
# Set file url to load this notebook
self.file_url = self.add_token(url)
def go_to(self, url_or_text):
"""Go to page URL."""
if isinstance(url_or_text, str):
url = QUrl(url_or_text)
else:
url = url_or_text
self.notebookwidget.load(url)
def load_notebook(self):
"""Load the associated notebook."""
self.go_to(self.file_url)
def get_filename(self):
"""Get notebook's filename."""
return self.filename
def get_short_name(self):
"""Get a short name for the notebook."""
sname = osp.splitext(osp.basename(self.filename))[0]
if len(sname) > 20:
fm = QFontMetrics(QFont())
sname = fm.elidedText(sname, Qt.ElideRight, 110)
return sname
def save(self):
"""
Save current notebook asynchronously.
This function simulates a click on the Save button in the notebook
which will save the current notebook (but the function will return
before). The Save button is found by selecting the first element of
class `jp-ToolbarButtonComponent` whose `title` attribute begins with
the string "Save".
"""
self.notebookwidget.mousedown(
'.jp-ToolbarButtonComponent[title^="Save"]')
def get_session_url(self):
"""
Get the kernel sessions URL of the client.
Return a str with the URL or None, if no server is associated to
the client.
"""
if self.server_url:
session_url = url_path_join(self.server_url, 'api/sessions')
return self.add_token(session_url)
else:
return None
def get_kernel_id(self):
"""
Get the kernel id of the client.
Return a str with the kernel id or None. On error, display a dialog
box and return None.
"""
sessions_url = self.get_session_url()
if not sessions_url:
return None
try:
sessions_response = requests.get(sessions_url)
except requests.exceptions.RequestException as exception:
msg = _('Spyder could not get a list of sessions '
'from the Jupyter Notebook server. '
'Message: {}').format(exception)
QMessageBox.warning(self, _('Server error'), msg)
return None
if sessions_response.status_code != requests.codes.ok:
msg = _('Spyder could not get a list of sessions '
'from the Jupyter Notebook server. '
'Status code: {}').format(sessions_response.status_code)
QMessageBox.warning(self, _('Server error'), msg)
return None
if os.name == 'nt':
path = self.path.replace('\\', '/')
else:
path = self.path
sessions = json.loads(sessions_response.content.decode())
for session in sessions:
notebook_path = session.get('notebook', {}).get('path')
if notebook_path is not None and notebook_path == path:
kernel_id = session['kernel']['id']
return kernel_id
def shutdown_kernel(self):
"""Shutdown the kernel of the client."""
kernel_id = self.get_kernel_id()
if kernel_id:
delete_url = self.add_token(url_path_join(self.server_url,
'api/kernels/',
kernel_id))
delete_req = requests.delete(delete_url)
if delete_req.status_code != 204:
QMessageBox.warning(
self,
_("Server error"),
_("The Jupyter Notebook server "
"failed to shutdown the kernel "
"associated with this notebook. "
"If you want to shut it down, "
"you'll have to close Spyder."))
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def main():
"""Execute a simple test."""
from spyder.utils.qthelpers import qapplication
app = qapplication()
widget = NotebookClient(parent=None, filename='')
widget.show()
widget.go_to('http://google.com')
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
|
import datetime
import logging
from itertools import chain, product
from actstream.actions import follow, unfollow
from actstream.models import Follow
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField, CICharField
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import validate_slug
from django.db import models
from django.db.models.signals import post_delete, pre_delete
from django.db.transaction import on_commit
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.utils.html import format_html
from guardian.shortcuts import assign_perm
from guardian.utils import get_anonymous_user
from machina.apps.forum.models import Forum
from machina.apps.forum_permission.models import (
ForumPermission,
GroupForumPermission,
UserForumPermission,
)
from stdimage import JPEGField
from tldextract import extract
from grandchallenge.anatomy.models import BodyStructure
from grandchallenge.challenges.emails import (
send_challenge_created_email,
send_external_challenge_created_email,
)
from grandchallenge.core.storage import (
get_banner_path,
get_logo_path,
get_social_image_path,
public_s3_storage,
)
from grandchallenge.evaluation.tasks import assign_evaluation_permissions
from grandchallenge.modalities.models import ImagingModality
from grandchallenge.organizations.models import Organization
from grandchallenge.pages.models import Page
from grandchallenge.publications.models import Publication
from grandchallenge.subdomains.utils import reverse
from grandchallenge.task_categories.models import TaskType
logger = logging.getLogger(__name__)
class ChallengeManager(models.Manager):
def non_hidden(self):
"""Filter the hidden challenge"""
return self.filter(hidden=False)
def validate_nounderscores(value):
if "_" in value:
raise ValidationError("Underscores (_) are not allowed.")
def validate_short_name(value):
if value.lower() in settings.DISALLOWED_CHALLENGE_NAMES:
raise ValidationError("That name is not allowed.")
class ChallengeSeries(models.Model):
name = CICharField(max_length=64, blank=False, unique=True)
url = models.URLField(blank=True)
class Meta:
ordering = ("name",)
verbose_name_plural = "Challenge Series"
def __str__(self):
return f"{self.name}"
@property
def badge(self):
return format_html(
(
'<span class="badge badge-info above-stretched-link" '
'title="Associated with {0}"><i class="fas fa-globe fa-fw">'
"</i> {0}</span>"
),
self.name,
)
class ChallengeBase(models.Model):
CHALLENGE_ACTIVE = "challenge_active"
CHALLENGE_INACTIVE = "challenge_inactive"
DATA_PUB = "data_pub"
creator = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL
)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
short_name = CICharField(
max_length=50,
blank=False,
help_text=(
"short name used in url, specific css, files etc. "
"No spaces allowed"
),
validators=[
validate_nounderscores,
validate_slug,
validate_short_name,
],
unique=True,
)
description = models.CharField(
max_length=1024,
default="",
blank=True,
help_text="Short summary of this project, max 1024 characters.",
)
title = models.CharField(
max_length=64,
blank=True,
default="",
help_text=(
"The name of the challenge that is displayed on the All Challenges"
" page. If this is blank the short name of the challenge will be "
"used."
),
)
logo = JPEGField(
upload_to=get_logo_path,
storage=public_s3_storage,
blank=True,
help_text="A logo for this challenge. Should be square with a resolution of 640x640 px or higher.",
variations=settings.STDIMAGE_LOGO_VARIATIONS,
)
social_image = JPEGField(
upload_to=get_social_image_path,
storage=public_s3_storage,
blank=True,
help_text="An image for this challenge which is displayed when you post the link on social media. Should have a resolution of 640x320 px (1280x640 px for best display).",
variations=settings.STDIMAGE_SOCIAL_VARIATIONS,
)
hidden = models.BooleanField(
default=True,
help_text="Do not display this Project in any public overview",
)
educational = models.BooleanField(
default=False, help_text="It is an educational challange"
)
workshop_date = models.DateField(
null=True,
blank=True,
help_text=(
"Date on which the workshop belonging to this project will be held"
),
)
event_name = models.CharField(
max_length=1024,
default="",
blank=True,
null=True,
help_text="The name of the event the workshop will be held at",
)
event_url = models.URLField(
blank=True,
null=True,
help_text="Website of the event which will host the workshop",
)
publications = models.ManyToManyField(
Publication,
blank=True,
help_text="Which publications are associated with this challenge?",
)
data_license_agreement = models.TextField(
blank=True,
help_text="What is the data license agreement for this challenge?",
)
task_types = models.ManyToManyField(
TaskType, blank=True, help_text="What type of task is this challenge?"
)
modalities = models.ManyToManyField(
ImagingModality,
blank=True,
help_text="What imaging modalities are used in this challenge?",
)
structures = models.ManyToManyField(
BodyStructure,
blank=True,
help_text="What structures are used in this challenge?",
)
series = models.ManyToManyField(
ChallengeSeries,
blank=True,
help_text="Which challenge series is this associated with?",
)
organizations = models.ManyToManyField(
Organization,
blank=True,
help_text="The organizations associated with this challenge",
related_name="%(class)ss",
)
number_of_training_cases = models.IntegerField(blank=True, null=True)
number_of_test_cases = models.IntegerField(blank=True, null=True)
filter_classes = ArrayField(
CICharField(max_length=32), default=list, editable=False
)
objects = ChallengeManager()
def __str__(self):
return self.short_name
@property
def public(self):
"""Helper property for consistency with other objects"""
return not self.hidden
def get_absolute_url(self):
raise NotImplementedError
@property
def is_self_hosted(self):
return True
@property
def year(self):
if self.workshop_date:
return self.workshop_date.year
else:
return self.created.year
@property
def upcoming_workshop_date(self):
if self.workshop_date and self.workshop_date > datetime.date.today():
return self.workshop_date
@property
def registered_domain(self):
"""
Copied from grandchallenge_tags
Try to find out what framework this challenge is hosted on, return
a string which can also be an id or class in HTML
"""
return extract(self.get_absolute_url()).registered_domain
class Meta:
abstract = True
ordering = ("pk",)
class Challenge(ChallengeBase):
banner = JPEGField(
upload_to=get_banner_path,
storage=public_s3_storage,
blank=True,
help_text=(
"Image that gets displayed at the top of each page. "
"Recommended resolution 2200x440 px."
),
variations=settings.STDIMAGE_BANNER_VARIATIONS,
)
disclaimer = models.CharField(
max_length=2048,
default="",
blank=True,
null=True,
help_text=(
"Optional text to show on each page in the project. "
"For showing 'under construction' type messages"
),
)
require_participant_review = models.BooleanField(
default=False,
help_text=(
"If ticked, new participants need to be approved by project "
"admins before they can access restricted pages. If not ticked, "
"new users are allowed access immediately"
),
)
use_registration_page = models.BooleanField(
default=True,
help_text="If true, show a registration page on the challenge site.",
)
registration_page_text = models.TextField(
default="",
blank=True,
help_text=(
"The text to use on the registration page, you could include "
"a data usage agreement here. You can use HTML markup here."
),
)
use_workspaces = models.BooleanField(default=False)
use_evaluation = models.BooleanField(
default=True,
help_text=(
"If true, use the automated evaluation system. See the evaluation "
"page created in the Challenge site."
),
)
use_teams = models.BooleanField(
default=False,
help_text=(
"If true, users are able to form teams to participate in "
"this challenge together."
),
)
admins_group = models.OneToOneField(
Group,
editable=False,
on_delete=models.PROTECT,
related_name="admins_of_challenge",
)
participants_group = models.OneToOneField(
Group,
editable=False,
on_delete=models.PROTECT,
related_name="participants_of_challenge",
)
forum = models.OneToOneField(
Forum, editable=False, on_delete=models.PROTECT
)
display_forum_link = models.BooleanField(
default=False,
help_text="Display a link to the challenge forum in the nav bar.",
)
cached_num_participants = models.PositiveIntegerField(
editable=False, default=0
)
cached_num_results = models.PositiveIntegerField(editable=False, default=0)
cached_latest_result = models.DateTimeField(
editable=False, blank=True, null=True
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hidden_orig = self.hidden
def save(self, *args, **kwargs):
adding = self._state.adding
if adding:
self.create_groups()
self.create_forum()
super().save(*args, **kwargs)
if adding:
if self.creator:
self.add_admin(user=self.creator)
self.update_permissions()
self.create_forum_permissions()
self.create_default_pages()
self.create_default_phases()
send_challenge_created_email(self)
if adding or self.hidden != self._hidden_orig:
on_commit(
lambda: assign_evaluation_permissions.apply_async(
kwargs={"challenge_pk": self.pk}
)
)
self.update_user_forum_permissions()
def update_permissions(self):
assign_perm("change_challenge", self.admins_group, self)
def create_forum_permissions(self):
participant_group_perms = {
"can_see_forum",
"can_read_forum",
"can_start_new_topics",
"can_reply_to_topics",
"can_delete_own_posts",
"can_edit_own_posts",
"can_post_without_approval",
"can_create_polls",
"can_vote_in_polls",
}
admin_group_perms = {
"can_lock_topics",
"can_edit_posts",
"can_delete_posts",
"can_approve_posts",
"can_reply_to_locked_topics",
"can_post_announcements",
"can_post_stickies",
*participant_group_perms,
}
permissions = ForumPermission.objects.filter(
codename__in=admin_group_perms
).values_list("codename", "pk")
permissions = {codename: pk for codename, pk in permissions}
GroupForumPermission.objects.bulk_create(
chain(
(
GroupForumPermission(
permission_id=permissions[codename],
group=self.participants_group,
forum=self.forum,
has_perm=True,
)
for codename in participant_group_perms
),
(
GroupForumPermission(
permission_id=permissions[codename],
group=self.admins_group,
forum=self.forum,
has_perm=True,
)
for codename in admin_group_perms
),
)
)
UserForumPermission.objects.bulk_create(
UserForumPermission(
permission_id=permissions[codename],
**{user: True},
forum=self.forum,
has_perm=not self.hidden,
)
for codename, user in product(
["can_see_forum", "can_read_forum"],
["anonymous_user", "authenticated_user"],
)
)
def update_user_forum_permissions(self):
perms = UserForumPermission.objects.filter(
permission__codename__in=["can_see_forum", "can_read_forum"],
forum=self.forum,
)
for p in perms:
p.has_perm = not self.hidden
UserForumPermission.objects.bulk_update(perms, ["has_perm"])
def create_groups(self):
# Create the groups only on first save
admins_group = Group.objects.create(name=f"{self.short_name}_admins")
participants_group = Group.objects.create(
name=f"{self.short_name}_participants"
)
self.admins_group = admins_group
self.participants_group = participants_group
def create_forum(self):
f, created = Forum.objects.get_or_create(
name=settings.FORUMS_CHALLENGE_CATEGORY_NAME, type=Forum.FORUM_CAT,
)
if created:
UserForumPermission.objects.bulk_create(
UserForumPermission(
permission_id=perm_id,
**{user: True},
forum=f,
has_perm=True,
)
for perm_id, user in product(
ForumPermission.objects.filter(
codename__in=["can_see_forum", "can_read_forum"]
).values_list("pk", flat=True),
["anonymous_user", "authenticated_user"],
)
)
self.forum = Forum.objects.create(
name=self.title if self.title else self.short_name,
parent=f,
type=Forum.FORUM_POST,
)
def create_default_pages(self):
Page.objects.create(
title=self.short_name,
html=render_to_string(
"pages/defaults/home.html", {"challenge": self}
),
challenge=self,
permission_level=Page.ALL,
)
Page.objects.create(
title="Contact",
html=render_to_string(
"pages/defaults/contact.html", {"challenge": self}
),
challenge=self,
permission_level=Page.REGISTERED_ONLY,
)
def create_default_phases(self):
self.phase_set.create(challenge=self)
def is_admin(self, user) -> bool:
"""Determines if this user is an admin of this challenge."""
return (
user.is_superuser
or user.groups.filter(pk=self.admins_group.pk).exists()
)
def is_participant(self, user) -> bool:
"""Determines if this user is a participant of this challenge."""
return (
user.is_superuser
or user.groups.filter(pk=self.participants_group.pk).exists()
)
def get_admins(self):
"""Return all admins of this challenge."""
return self.admins_group.user_set.all()
def get_participants(self):
"""Return all participants of this challenge."""
return self.participants_group.user_set.all()
def get_absolute_url(self):
return reverse(
"pages:home", kwargs={"challenge_short_name": self.short_name},
)
def add_participant(self, user):
if user != get_anonymous_user():
user.groups.add(self.participants_group)
follow(
user=user, obj=self.forum, actor_only=False, send_action=False
)
else:
raise ValueError("You cannot add the anonymous user to this group")
def remove_participant(self, user):
user.groups.remove(self.participants_group)
unfollow(user=user, obj=self.forum, send_action=False)
def add_admin(self, user):
if user != get_anonymous_user():
user.groups.add(self.admins_group)
follow(
user=user, obj=self.forum, actor_only=False, send_action=False
)
else:
raise ValueError("You cannot add the anonymous user to this group")
def remove_admin(self, user):
user.groups.remove(self.admins_group)
unfollow(user=user, obj=self.forum, send_action=False)
class Meta(ChallengeBase.Meta):
verbose_name = "challenge"
verbose_name_plural = "challenges"
@receiver(post_delete, sender=Challenge)
def delete_challenge_groups_hook(*_, instance: Challenge, using, **__):
"""
Deletes the related groups.
We use a signal rather than overriding delete() to catch usages of
bulk_delete.
"""
try:
instance.admins_group.delete(using=using)
except ObjectDoesNotExist:
pass
try:
instance.participants_group.delete(using=using)
except ObjectDoesNotExist:
pass
class ExternalChallenge(ChallengeBase):
homepage = models.URLField(
blank=False, help_text=("What is the homepage for this challenge?")
)
data_stored = models.BooleanField(
default=False,
help_text=("Has the grand-challenge team stored the data?"),
)
def save(self, *args, **kwargs):
adding = self._state.adding
super().save(*args, **kwargs)
if adding:
send_external_challenge_created_email(self)
def get_absolute_url(self):
return self.homepage
@property
def is_self_hosted(self):
return False
@receiver(pre_delete, sender=Challenge)
@receiver(pre_delete, sender=ExternalChallenge)
def delete_challenge_follows(*_, instance: Challenge, **__):
ct = ContentType.objects.filter(
app_label=instance._meta.app_label, model=instance._meta.model_name
).get()
Follow.objects.filter(object_id=instance.pk, content_type=ct).delete()
|
|
"""
author: Lalit Jain, [email protected]
modified: Chris Fernandez
last updated: 06/03/2015
A module that can be used to create and launch experiments. Can be imported as a module or used on the command line.
Usage:
As a module:
exp_uid_list, exp_key_list, widget_key_list = launch_experiment(host, experiment_file, AWS_ID, AWS_KEY)
Command line:
export NEXT_FRONTEND_GLOBAL_HOST=
export AWS_ACCESS_KEY_ID=
export AWS_SECRET_ACCESS_KEY=
python launch_experiment --experiment_file=
"""
import random
import json
import time, datetime
import requests
import os, sys, getopt, imp
import csv, zipfile, getopt
from StringIO import StringIO
from boto.s3.connection import S3Connection
from boto.s3.key import Key
def generate_target_blob(file, prefix, AWS_BUCKET_NAME, AWS_ID, AWS_KEY):
"""
Upload targets and return a target blob for upload with the target_manager.
Inputs: ::\n
file: fully qualified path of a file on the system. Must be a zipfile with pictures or a text file.
prefix: string to prefix every uploaded file name with
AWS_BUCKET_NAME: Aws bucket name
AWS_ID: Aws id
AWS_KEY: Aws key
"""
targets = []
if file.endswith('.zip'):
target_file_dict = zipfile_to_dictionary(file)
bucket = get_AWS_bucket(AWS_BUCKET_NAME, AWS_ID, AWS_KEY)
for target_name in target_file_dict.keys():
print "uploading", target_name
target_file = target_file_dict[target_name]
target_url = upload_to_S3(bucket, prefix+"_"+target_name, StringIO(target_file))
print "success", target_url
target = { 'target_id':prefix+"_"+target_name,
'primary_type': 'image',
'primary_description':target_url,
'alt_type': 'text',
'alt_description':target_name
}
targets.append(target)
elif file.endswith('.txt'):
i = 0
with open(file) as f:
for line in f:
line = line.strip()
if line:
i += 1
target = { 'target_id': str(i),
'primary_type': 'text',
'primary_description':line,
'alt_type': 'text',
'alt_description':line
}
targets.append(target)
else:
raise Exception('Target file name must be .txt or .zip.')
# print targets
return {'target_blob' : targets}
def get_AWS_bucket(AWS_BUCKET_NAME,AWS_ID, AWS_KEY):
"""
Creates a bucket for an S3 account
"""
conn = S3Connection(AWS_ID, AWS_KEY)
#Maybe by default we should try to create bucket and then catch exception?
#Also, migrate the bucket name to settings.Config
bucket = conn.get_bucket(AWS_BUCKET_NAME)
return bucket
def upload_to_S3(bucket, key, file_object):
"""
Uploads a file object to a S3 instance
Inputs: ::\n
bucket: S3 bucket we want to upload to
key: the key to access the file in the bucket;
file_object: the file that needs to be uploaded
"""
k = Key(bucket)
k.key = key
k.set_contents_from_file(file_object)
k.set_acl('public-read')
return k.generate_url(expires_in=0, query_auth=False, force_http=True)
def zipfile_to_dictionary(filename):
"""
Takes in a zip file and returns a dictionary with the filenames
as keys and file objects as values
Inputs: ::\n
file: the concerned zip file
Outputs: ::\n
result: the returned dictionary
"""
listOfFiles= []
dictionary ={}
print "filename in z to d",filename
zf = zipfile.ZipFile(filename,'r')
listOfFiles = zf.namelist()
for i in listOfFiles:
if not i.startswith('__MACOSX') and i.endswith(('jpg','jpeg','png','gif','bmp')):
f= zf.read(i)
dictionary[i] = f
return dictionary
def import_experiment_list(file):
# Load experiment file
mod=imp.load_source('experiment', experiment_file)
experiment_list = mod.experiment_list
return experiment_list
def launch_experiment(host, experiment_list, AWS_ID, AWS_KEY, AWS_BUCKET_NAME):
"""
Initialize experiment from an array in an experiment file.
Inputs: ::\n
host: hostname of server running next_frontend_base
experiment_file: Fully qualified system name of file containing experiment info. Should contain an array called experiment_list, whose elements are dictionaries containing all the info needed to launch an experiment. The dictionary must contain the key initExp, a qualified experiment initialization dictionary. It can also contain an optional target_file key that should be the fully qualified name of a target_file on the system. The target_file can be either text (must end in .txt) or a zipfile containing images (which must end in .zip). Can also add additional context_type and context keys. If the context_type is an image, the context must be a fully qualified file name.
AWS_ID: Aws id
AWS_KEY: Aws key
"""
exp_uid_list = []
exp_key_list = []
widget_key_list = []
# establish S3 connection and use boto get_bucket
bucket = get_AWS_bucket(AWS_BUCKET_NAME, AWS_ID, AWS_KEY)
# Initialize experiment
for experiment in experiment_list:
# Upload the context if there is one.
# This is a bit sloppy. Try to think of a better way to do this.
if 'context' in experiment.keys() and experiment['context_type']=='image':
print experiment['context'].split("/")[-1], experiment['context']
context_url = upload_to_S3(bucket, experiment['context'].split("/")[-1], open(experiment['context']))
experiment['initExp']['args']['context'] = context_url
experiment['initExp']['args']['context_type'] = "image"
elif 'context' in experiment.keys():
experiment['initExp']['args']['context'] = experiment['context']
experiment['initExp']['args']['context_type'] = experiment['context_type']
url = "http://"+host+"/api/experiment"
print "Initializing experiment", experiment['initExp']
response = requests.post(url, json.dumps(experiment['initExp']), headers={'content-type':'application/json'})
print "initExp response = ",response.text, response.status_code
initExp_response_dict = json.loads(response.text)
exp_uid = initExp_response_dict['exp_uid']
exp_key = initExp_response_dict['exp_key']
perm_key = initExp_response_dict['perm_key']
exp_uid_list.append(str(exp_uid))
exp_key_list.append(str(exp_key))
widget_key_list.append(str(perm_key))
# Upload targets
if 'target_file' in experiment.keys():
target_file = experiment['target_file']
print "target file in launch_Experiment", target_file
target_blob = generate_target_blob(file=target_file,
prefix=str(datetime.date.today()),
AWS_BUCKET_NAME=AWS_BUCKET_NAME,
AWS_ID=AWS_ID,
AWS_KEY=AWS_KEY)
create_target_mapping_dict = {}
create_target_mapping_dict['app_id'] = experiment['initExp']['app_id']
create_target_mapping_dict['exp_uid'] = exp_uid
create_target_mapping_dict['exp_key'] = exp_key
create_target_mapping_dict['target_blob'] = target_blob['target_blob']
#print create_target_mapping_dict
url = "http://"+host+"/api/targets/createtargetmapping"
response = requests.post(url, json.dumps(create_target_mapping_dict), headers={'content-type':'application/json'})
print "Create Target Mapping response", response, response.text, response.status_code
print
print "Query Url is at:", "http://"+host+"/query/query_page/query_page/"+exp_uid+"/"+perm_key
print
print "exp_uid_list:", exp_uid_list
print "exp_key_list:", exp_key_list
print "widget_key_list:", widget_key_list
return exp_uid_list, exp_key_list, widget_key_list
if __name__=='__main__':
opts, args = getopt.getopt(sys.argv[1:], None, ["experiment_file="])
opts = dict(opts)
# Make sure to check for aws id and key here
if not 'AWS_SECRET_ACCESS_KEY' in os.environ.keys() or not 'AWS_ACCESS_KEY_ID' in os.environ.keys() or not 'NEXT_BACKEND_GLOBAL_HOST' or not 'AWS_BUCKET_NAME' in os.environ.keys():
print "You must set AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, NEXT_BACKEND_GLOBAL_HOST, AWS_BUCKET_NAME as environment variables"
sys.exit()
print opts['--experiment_file']
port = os.environ.keys.get('NEXT_BACKEND_GLOBAL_PORT',8000)
experiment_list = import_experiment_list(opts['--experiment_file'])
launch_experiment(os.environ.get('NEXT_BACKEND_GLOBAL_HOST')+":"+port, experiment_list, AWS_ID=os.environ.get('AWS_ACCESS_KEY_ID'), AWS_KEY=os.environ.get('AWS_SECRET_ACCESS_KEY'), AWS_BUCKET_NAME=os.environ.get('AWS_BUCKET_NAME') )
#if __name__ == "__main__":
# opts, args = getopt.getopt(sys.argv[1:], None, ["prefix=","file=","bucket=", "AWS_ID=", "AWS_KEY="])
# opts = dict(opts)
# print generate_target_blob(opts['--file'], opts['--prefix'], opts['--bucket'], opts['--id'], opts['--key'])
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NatGatewaysOperations:
"""NatGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified nat gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
nat_gateway_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NatGateway":
"""Gets the specified nat gateway in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NatGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.NatGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs: Any
) -> "_models.NatGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NatGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NatGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NatGateway', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.NatGateway"]:
"""Creates or updates a nat gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param parameters: Parameters supplied to the create or update nat gateway operation.
:type parameters: ~azure.mgmt.network.v2019_11_01.models.NatGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NatGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_11_01.models.NatGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NatGateway":
"""Updates nat gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param parameters: Parameters supplied to update nat gateway tags.
:type parameters: ~azure.mgmt.network.v2019_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NatGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.NatGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.NatGatewayListResult"]:
"""Gets all the Nat Gateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NatGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.NatGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/natGateways'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NatGatewayListResult"]:
"""Gets all nat gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NatGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.NatGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways'} # type: ignore
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 4316
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
from pandac.PandaModules import Vec3, Point3, Point4, TextNode, NodePath
from pandac.PandaModules import CollisionHandlerEvent, CollisionNode, CollisionSphere
from direct.distributed.ClockDelta import globalClockDelta
from direct.interval.IntervalGlobal import Sequence, Parallel
from direct.interval.IntervalGlobal import LerpScaleInterval, LerpFunctionInterval, LerpColorScaleInterval, LerpPosInterval
from direct.interval.IntervalGlobal import SoundInterval, WaitInterval
from direct.showbase.PythonUtil import Functor, bound, lerp, SerialNumGen
from direct.showbase.RandomNumGen import RandomNumGen
from direct.task.Task import Task
from direct.distributed import DistributedSmoothNode
from direct.directnotify import DirectNotifyGlobal
from direct.interval.FunctionInterval import Wait, Func
from toontown.toonbase import TTLocalizer
from toontown.toon import Toon
from toontown.toonbase import ToontownGlobals
from toontown.minigame.Trajectory import Trajectory
from toontown.minigame.OrthoDrive import OrthoDrive
from toontown.minigame.OrthoWalk import OrthoWalk
from toontown.minigame.DropPlacer import PartyRegionDropPlacer
from toontown.parties import PartyGlobals
from toontown.parties.PartyCatchActivityToonSD import PartyCatchActivityToonSD
from toontown.parties.DistributedPartyActivity import DistributedPartyActivity
from toontown.parties.DistributedPartyCatchActivityBase import DistributedPartyCatchActivityBase
from toontown.parties.DistributedPartyCannonActivity import DistributedPartyCannonActivity
from toontown.parties.activityFSMs import CatchActivityFSM
class DistributedPartyCatchActivity(DistributedPartyActivity, DistributedPartyCatchActivityBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPartyCatchActivity')
DropTaskName = 'dropSomething'
DropObjectPlurals = {'apple': TTLocalizer.PartyCatchActivityApples,
'orange': TTLocalizer.PartyCatchActivityOranges,
'pear': TTLocalizer.PartyCatchActivityPears,
'coconut': TTLocalizer.PartyCatchActivityCoconuts,
'watermelon': TTLocalizer.PartyCatchActivityWatermelons,
'pineapple': TTLocalizer.PartyCatchActivityPineapples,
'anvil': TTLocalizer.PartyCatchActivityAnvils}
class Generation:
def __init__(self, generation, startTime, startNetworkTime, numPlayers):
self.generation = generation
self.startTime = startTime
self.startNetworkTime = startNetworkTime
self.numPlayers = numPlayers
self.hasBeenScheduled = False
self.droppedObjNames = []
self.dropSchedule = []
self.numItemsDropped = 0
self.droppedObjCaught = {}
def __init__(self, cr):
DistributedPartyActivity.__init__(self, cr, PartyGlobals.ActivityIds.PartyCatch, PartyGlobals.ActivityTypes.HostInitiated, wantRewardGui=True)
self.setUsesSmoothing()
self.setUsesLookAround()
self._sNumGen = SerialNumGen()
def getTitle(self):
return TTLocalizer.PartyCatchActivityTitle
def getInstructions(self):
return TTLocalizer.PartyCatchActivityInstructions % {'badThing': self.DropObjectPlurals['anvil']}
def generate(self):
DistributedPartyActivity.generate(self)
self.notify.info('localAvatar doId: %s' % base.localAvatar.doId)
self.notify.info('generate()')
self._generateFrame = globalClock.getFrameCount()
self._id2gen = {}
self._orderedGenerations = []
self._orderedGenerationIndex = None
rng = RandomNumGen(self.doId)
self._generationSeedBase = rng.randrange(1000)
self._lastDropTime = 0.0
return
def getCurGeneration(self):
if self._orderedGenerationIndex is None:
return
return self._orderedGenerations[self._orderedGenerationIndex]
def _addGeneration(self, generation, startTime, startNetworkTime, numPlayers):
self._id2gen[generation] = self.Generation(generation, startTime, startNetworkTime, numPlayers)
i = 0
while 1:
if i >= len(self._orderedGenerations):
break
gen = self._orderedGenerations[i]
startNetT = self._id2gen[gen].startTime
genId = self._id2gen[gen].generation
if startNetT > startNetworkTime:
break
if startNetT == startNetworkTime and genId > generation:
break
i += 1
self._orderedGenerations = self._orderedGenerations[:i] + [generation] + self._orderedGenerations[i:]
if self._orderedGenerationIndex is not None:
if self._orderedGenerationIndex >= i:
self._orderedGenerationIndex += 1
def _removeGeneration(self, generation):
del self._id2gen[generation]
i = self._orderedGenerations.index(generation)
self._orderedGenerations = self._orderedGenerations[:i] + self._orderedGenerations[i + 1:]
if self._orderedGenerationIndex is not None:
if len(self._orderedGenerations):
if self._orderedGenerationIndex >= i:
self._orderedGenerationIndex -= 1
else:
self._orderedGenerationIndex = None
return
def announceGenerate(self):
self.notify.info('announceGenerate()')
self.catchTreeZoneEvent = 'fence_floor'
DistributedPartyActivity.announceGenerate(self)
def load(self, loadModels = 1, arenaModel = 'partyCatchTree'):
self.notify.info('load()')
DistributedPartyCatchActivity.notify.debug('PartyCatch: load')
self.activityFSM = CatchActivityFSM(self)
if __dev__:
for o in xrange(3):
print {0: 'SPOTS PER PLAYER',
1: 'DROPS PER MINUTE PER SPOT DURING NORMAL DROP PERIOD',
2: 'DROPS PER MINUTE PER PLAYER DURING NORMAL DROP PERIOD'}[o]
for i in xrange(1, self.FallRateCap_Players + 10):
self.defineConstants(forceNumPlayers=i)
numDropLocations = self.DropRows * self.DropColumns
numDropsPerMin = 60.0 / self.DropPeriod
if o == 0:
spotsPerPlayer = numDropLocations / float(i)
print '%2d PLAYERS: %s' % (i, spotsPerPlayer)
elif o == 1:
numDropsPerMinPerSpot = numDropsPerMin / numDropLocations
print '%2d PLAYERS: %s' % (i, numDropsPerMinPerSpot)
elif i > 0:
numDropsPerMinPerPlayer = numDropsPerMin / i
print '%2d PLAYERS: %s' % (i, numDropsPerMinPerPlayer)
self.defineConstants()
self.treesAndFence = loader.loadModel('phase_13/models/parties/%s' % arenaModel)
self.treesAndFence.setScale(0.9)
self.treesAndFence.find('**/fence_floor').setPos(0.0, 0.0, 0.1)
self.treesAndFence.reparentTo(self.root)
ground = self.treesAndFence.find('**/groundPlane')
ground.setBin('ground', 1)
DistributedPartyActivity.load(self)
exitText = TextNode('PartyCatchExitText')
exitText.setCardAsMargin(0.1, 0.1, 0.1, 0.1)
exitText.setCardDecal(True)
exitText.setCardColor(1.0, 1.0, 1.0, 0.0)
exitText.setText(TTLocalizer.PartyCatchActivityExit)
exitText.setTextColor(0.0, 8.0, 0.0, 0.9)
exitText.setAlign(exitText.ACenter)
exitText.setFont(ToontownGlobals.getBuildingNametagFont())
exitText.setShadowColor(0, 0, 0, 1)
exitText.setBin('fixed')
if TTLocalizer.BuildingNametagShadow:
exitText.setShadow(*TTLocalizer.BuildingNametagShadow)
exitTextLoc = self.treesAndFence.find('**/loc_exitSignText')
exitTextNp = exitTextLoc.attachNewNode(exitText)
exitTextNp.setDepthWrite(0)
exitTextNp.setScale(4)
exitTextNp.setZ(-.5)
self.sign.reparentTo(self.treesAndFence.find('**/loc_eventSign'))
self.sign.wrtReparentTo(self.root)
self.avatarNodePath = NodePath('PartyCatchAvatarNodePath')
self.avatarNodePath.reparentTo(self.root)
self._avatarNodePathParentToken = 3
base.cr.parentMgr.registerParent(self._avatarNodePathParentToken, self.avatarNodePath)
self.toonSDs = {}
self.dropShadow = loader.loadModelOnce('phase_3/models/props/drop_shadow')
self.dropObjModels = {}
if loadModels:
self.__loadDropModels()
self.sndGoodCatch = base.loadSfx('phase_4/audio/sfx/SZ_DD_treasure.ogg')
self.sndOof = base.loadSfx('phase_4/audio/sfx/MG_cannon_hit_dirt.ogg')
self.sndAnvilLand = base.loadSfx('phase_4/audio/sfx/AA_drop_anvil_miss.ogg')
self.sndPerfect = base.loadSfx('phase_4/audio/sfx/ring_perfect.ogg')
self.__textGen = TextNode('partyCatchActivity')
self.__textGen.setFont(ToontownGlobals.getSignFont())
self.__textGen.setAlign(TextNode.ACenter)
self.activityFSM.request('Idle')
def __loadDropModels(self):
for objType in PartyGlobals.DropObjectTypes:
model = loader.loadModel(objType.modelPath)
self.dropObjModels[objType.name] = model
modelScales = {'apple': 0.7,
'orange': 0.7,
'pear': 0.5,
'coconut': 0.7,
'watermelon': 0.6,
'pineapple': 0.45}
if objType.name in modelScales:
model.setScale(modelScales[objType.name])
if objType == PartyGlobals.Name2DropObjectType['pear']:
model.setZ(-.6)
if objType == PartyGlobals.Name2DropObjectType['coconut']:
model.setP(180)
if objType == PartyGlobals.Name2DropObjectType['watermelon']:
model.setH(135)
model.setZ(-.5)
if objType == PartyGlobals.Name2DropObjectType['pineapple']:
model.setZ(-1.7)
if objType == PartyGlobals.Name2DropObjectType['anvil']:
model.setZ(-self.ObjRadius)
model.flattenStrong()
def unload(self):
DistributedPartyCatchActivity.notify.debug('unload')
self.finishAllDropIntervals()
self.destroyOrthoWalk()
DistributedPartyActivity.unload(self)
self.stopDropTask()
del self.activityFSM
del self.__textGen
for avId in self.toonSDs.keys():
if avId in self.toonSDs:
toonSD = self.toonSDs[avId]
toonSD.unload()
del self.toonSDs
self.treesAndFence.removeNode()
del self.treesAndFence
self.dropShadow.removeNode()
del self.dropShadow
base.cr.parentMgr.unregisterParent(self._avatarNodePathParentToken)
for model in self.dropObjModels.values():
model.removeNode()
del self.dropObjModels
del self.sndGoodCatch
del self.sndOof
del self.sndAnvilLand
del self.sndPerfect
def setStartTimestamp(self, timestamp32):
self.notify.info('setStartTimestamp(%s)' % (timestamp32,))
self._startTimestamp = globalClockDelta.networkToLocalTime(timestamp32, bits=32)
def getCurrentCatchActivityTime(self):
return globalClock.getFrameTime() - self._startTimestamp
def getObjModel(self, objName):
return self.dropObjModels[objName].copyTo(hidden)
def joinRequestDenied(self, reason):
DistributedPartyActivity.joinRequestDenied(self, reason)
base.cr.playGame.getPlace().fsm.request('walk')
def handleToonJoined(self, toonId):
if toonId not in self.toonSDs:
toonSD = PartyCatchActivityToonSD(toonId, self)
self.toonSDs[toonId] = toonSD
toonSD.load()
self.notify.debug('handleToonJoined : currentState = %s' % self.activityFSM.state)
self.cr.doId2do[toonId].useLOD(500)
if self.activityFSM.state == 'Active':
if toonId in self.toonSDs:
self.toonSDs[toonId].enter()
if base.localAvatar.doId == toonId:
base.localAvatar.b_setParent(self._avatarNodePathParentToken)
self.putLocalAvatarInActivity()
if toonId in self.toonSDs:
self.toonSDs[toonId].fsm.request('rules')
def handleToonExited(self, toonId):
self.notify.debug('handleToonExited( toonId=%s )' % toonId)
if toonId in self.cr.doId2do:
self.cr.doId2do[toonId].resetLOD()
if toonId in self.toonSDs:
self.toonSDs[toonId].fsm.request('notPlaying')
self.toonSDs[toonId].exit()
self.toonSDs[toonId].unload()
del self.toonSDs[toonId]
if base.localAvatar.doId == toonId:
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def takeLocalAvatarOutOfActivity(self):
self.notify.debug('localToon has left the circle')
camera.reparentTo(base.localAvatar)
base.localAvatar.startUpdateSmartCamera()
base.localAvatar.enableSmartCameraViews()
base.localAvatar.setCameraPositionByIndex(base.localAvatar.cameraIndex)
DistributedSmoothNode.activateSmoothing(1, 0)
def _enableCollisions(self):
DistributedPartyActivity._enableCollisions(self)
self._enteredTree = False
self.accept('enter' + self.catchTreeZoneEvent, self._toonMayHaveEnteredTree)
self.accept('again' + self.catchTreeZoneEvent, self._toonMayHaveEnteredTree)
self.accept('exit' + self.catchTreeZoneEvent, self._toonExitedTree)
self.accept(DistributedPartyCannonActivity.LOCAL_TOON_LANDED_EVENT, self._handleCannonLanded)
def _disableCollisions(self):
self.ignore(DistributedPartyCannonActivity.LOCAL_TOON_LANDED_EVENT)
self.ignore('enter' + self.catchTreeZoneEvent)
self.ignore('again' + self.catchTreeZoneEvent)
self.ignore('exit' + self.catchTreeZoneEvent)
DistributedPartyActivity._disableCollisions(self)
def _handleCannonLanded(self):
x = base.localAvatar.getX()
y = base.localAvatar.getY()
if x > self.x - self.StageHalfWidth and x < self.x + self.StageHalfWidth and y > self.y - self.StageHalfHeight and y < self.y + self.StageHalfHeight:
self._toonEnteredTree(None)
return
def _toonMayHaveEnteredTree(self, collEntry):
if self._enteredTree:
return
if base.localAvatar.controlManager.currentControls.getIsAirborne():
return
self._toonEnteredTree(collEntry)
def _toonEnteredTree(self, collEntry):
self.notify.debug('_toonEnteredTree : avid = %s' % base.localAvatar.doId)
self.notify.debug('_toonEnteredTree : currentState = %s' % self.activityFSM.state)
if self.isLocalToonInActivity():
return
if self.activityFSM.state == 'Active':
base.cr.playGame.getPlace().fsm.request('activity')
self.d_toonJoinRequest()
elif self.activityFSM.state == 'Idle':
base.cr.playGame.getPlace().fsm.request('activity')
self.d_toonJoinRequest()
self._enteredTree = True
def _toonExitedTree(self, collEntry):
self.notify.debug('_toonExitedTree : avid = %s' % base.localAvatar.doId)
self._enteredTree = False
if hasattr(base.cr.playGame.getPlace(), 'fsm') and self.activityFSM.state == 'Active' and self.isLocalToonInActivity():
if base.localAvatar.doId in self.toonSDs:
self.takeLocalAvatarOutOfActivity()
self.toonSDs[base.localAvatar.doId].fsm.request('notPlaying')
self.d_toonExitDemand()
def setToonsPlaying(self, toonIds):
self.notify.info('setToonsPlaying(%s)' % (toonIds,))
DistributedPartyActivity.setToonsPlaying(self, toonIds)
if self.isLocalToonInActivity() and base.localAvatar.doId not in toonIds:
if base.localAvatar.doId in self.toonSDs:
self.takeLocalAvatarOutOfActivity()
self.toonSDs[base.localAvatar.doId].fsm.request('notPlaying')
def __genText(self, text):
self.__textGen.setText(text)
return self.__textGen.generate()
def getNumPlayers(self):
return len(self.toonIds)
def defineConstants(self, forceNumPlayers = None):
DistributedPartyCatchActivity.notify.debug('defineConstants')
self.ShowObjSpheres = 0
self.ShowToonSpheres = 0
self.useGravity = True
self.trickShadows = True
if forceNumPlayers is None:
numPlayers = self.getNumPlayers()
else:
numPlayers = forceNumPlayers
self.calcDifficultyConstants(numPlayers)
DistributedPartyCatchActivity.notify.debug('ToonSpeed: %s' % self.ToonSpeed)
DistributedPartyCatchActivity.notify.debug('total drops: %s' % self.totalDrops)
DistributedPartyCatchActivity.notify.debug('numFruits: %s' % self.numFruits)
DistributedPartyCatchActivity.notify.debug('numAnvils: %s' % self.numAnvils)
self.ObjRadius = 1.0
dropRegionTable = PartyRegionDropPlacer.getDropRegionTable(numPlayers)
self.DropRows, self.DropColumns = len(dropRegionTable), len(dropRegionTable[0])
for objType in PartyGlobals.DropObjectTypes:
DistributedPartyCatchActivity.notify.debug('*** Object Type: %s' % objType.name)
objType.onscreenDuration = objType.onscreenDurMult * self.BaselineOnscreenDropDuration
DistributedPartyCatchActivity.notify.debug('onscreenDuration=%s' % objType.onscreenDuration)
v_0 = 0.0
t = objType.onscreenDuration
x_0 = self.MinOffscreenHeight
x = 0.0
g = 2.0 * (x - x_0 - v_0 * t) / (t * t)
DistributedPartyCatchActivity.notify.debug('gravity=%s' % g)
objType.trajectory = Trajectory(0, Vec3(0, 0, x_0), Vec3(0, 0, v_0), gravMult=abs(g / Trajectory.gravity))
objType.fallDuration = objType.onscreenDuration + self.OffscreenTime
return
def grid2world(self, column, row):
x = column / float(self.DropColumns - 1)
y = row / float(self.DropRows - 1)
x = x * 2.0 - 1.0
y = y * 2.0 - 1.0
x *= self.StageHalfWidth
y *= self.StageHalfHeight
return (x, y)
def showPosts(self):
self.hidePosts()
self.posts = [Toon.Toon(),
Toon.Toon(),
Toon.Toon(),
Toon.Toon()]
for i in xrange(len(self.posts)):
tree = self.posts[i]
tree.reparentTo(render)
x = self.StageHalfWidth
y = self.StageHalfHeight
if i > 1:
x = -x
if i % 2:
y = -y
tree.setPos(x + self.x, y + self.y, 0)
def hidePosts(self):
if hasattr(self, 'posts'):
for tree in self.posts:
tree.removeNode()
del self.posts
def showDropGrid(self):
self.hideDropGrid()
self.dropMarkers = []
for row in xrange(self.DropRows):
self.dropMarkers.append([])
rowList = self.dropMarkers[row]
for column in xrange(self.DropColumns):
toon = Toon.Toon()
toon.setDNA(base.localAvatar.getStyle())
toon.reparentTo(self.root)
toon.setScale(1.0 / 3)
x, y = self.grid2world(column, row)
toon.setPos(x, y, 0)
rowList.append(toon)
def hideDropGrid(self):
if hasattr(self, 'dropMarkers'):
for row in self.dropMarkers:
for marker in row:
marker.removeNode()
del self.dropMarkers
def handleToonDisabled(self, avId):
DistributedPartyCatchActivity.notify.debug('handleToonDisabled')
DistributedPartyCatchActivity.notify.debug('avatar ' + str(avId) + ' disabled')
if avId in self.toonSDs:
self.toonSDs[avId].exit(unexpectedExit=True)
del self.toonSDs[avId]
def turnOffSmoothingOnGuests(self):
pass
def setState(self, newState, timestamp):
self.notify.info('setState(%s, %s)' % (newState, timestamp))
DistributedPartyCatchActivity.notify.debug('setState( newState=%s, ... )' % newState)
DistributedPartyActivity.setState(self, newState, timestamp)
self.activityFSM.request(newState)
if newState == 'Active':
if base.localAvatar.doId != self.party.partyInfo.hostId:
if globalClock.getFrameCount() > self._generateFrame:
if base.localAvatar.getX() > self.x - self.StageHalfWidth and base.localAvatar.getX() < self.x + self.StageHalfWidth and base.localAvatar.getY() > self.y - self.StageHalfHeight and base.localAvatar.getY() < self.y + self.StageHalfHeight:
self._toonEnteredTree(None)
return
def putLocalAvatarInActivity(self):
if base.cr.playGame.getPlace() and hasattr(base.cr.playGame.getPlace(), 'fsm'):
base.cr.playGame.getPlace().fsm.request('activity', [False])
else:
self.notify.info("Avoided crash: toontown.parties.DistributedPartyCatchActivity:632, toontown.parties.DistributedPartyCatchActivity:1198, toontown.parties.activityFSMMixins:49, direct.fsm.FSM:423, AttributeError: 'NoneType' object has no attribute 'fsm'")
base.localAvatar.stopUpdateSmartCamera()
camera.reparentTo(self.treesAndFence)
camera.setPosHpr(0.0, -63.0, 30.0, 0.0, -20.0, 0.0)
if not hasattr(self, 'ltLegsCollNode'):
self.createCatchCollisions()
def createCatchCollisions(self):
radius = 0.7
handler = CollisionHandlerEvent()
handler.setInPattern('ltCatch%in')
self.ltLegsCollNode = CollisionNode('catchLegsCollNode')
self.ltLegsCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
self.ltHeadCollNode = CollisionNode('catchHeadCollNode')
self.ltHeadCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
self.ltLHandCollNode = CollisionNode('catchLHandCollNode')
self.ltLHandCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
self.ltRHandCollNode = CollisionNode('catchRHandCollNode')
self.ltRHandCollNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
legsCollNodepath = base.localAvatar.attachNewNode(self.ltLegsCollNode)
legsCollNodepath.hide()
head = base.localAvatar.getHeadParts().getPath(2)
headCollNodepath = head.attachNewNode(self.ltHeadCollNode)
headCollNodepath.hide()
lHand = base.localAvatar.getLeftHands()[0]
lHandCollNodepath = lHand.attachNewNode(self.ltLHandCollNode)
lHandCollNodepath.hide()
rHand = base.localAvatar.getRightHands()[0]
rHandCollNodepath = rHand.attachNewNode(self.ltRHandCollNode)
rHandCollNodepath.hide()
base.localAvatar.cTrav.addCollider(legsCollNodepath, handler)
base.localAvatar.cTrav.addCollider(headCollNodepath, handler)
base.localAvatar.cTrav.addCollider(lHandCollNodepath, handler)
base.localAvatar.cTrav.addCollider(lHandCollNodepath, handler)
if self.ShowToonSpheres:
legsCollNodepath.show()
headCollNodepath.show()
lHandCollNodepath.show()
rHandCollNodepath.show()
self.ltLegsCollNode.addSolid(CollisionSphere(0, 0, radius, radius))
self.ltHeadCollNode.addSolid(CollisionSphere(0, 0, 0, radius))
self.ltLHandCollNode.addSolid(CollisionSphere(0, 0, 0, 2 * radius / 3.0))
self.ltRHandCollNode.addSolid(CollisionSphere(0, 0, 0, 2 * radius / 3.0))
self.toonCollNodes = [legsCollNodepath,
headCollNodepath,
lHandCollNodepath,
rHandCollNodepath]
def destroyCatchCollisions(self):
if not hasattr(self, 'ltLegsCollNode'):
return
for collNode in self.toonCollNodes:
while collNode.node().getNumSolids():
collNode.node().removeSolid(0)
base.localAvatar.cTrav.removeCollider(collNode)
del self.toonCollNodes
del self.ltLegsCollNode
del self.ltHeadCollNode
del self.ltLHandCollNode
del self.ltRHandCollNode
def timerExpired(self):
pass
def __handleCatch(self, generation, objNum):
DistributedPartyCatchActivity.notify.debug('catch: %s' % [generation, objNum])
if base.localAvatar.doId not in self.toonIds:
return
self.showCatch(base.localAvatar.doId, generation, objNum)
objName = self._id2gen[generation].droppedObjNames[objNum]
objTypeId = PartyGlobals.Name2DOTypeId[objName]
self.sendUpdate('claimCatch', [generation, objNum, objTypeId])
self.finishDropInterval(generation, objNum)
def showCatch(self, avId, generation, objNum):
if avId not in self.toonSDs:
return
isLocal = avId == base.localAvatar.doId
if generation not in self._id2gen:
return
if not self._id2gen[generation].hasBeenScheduled:
return
objName = self._id2gen[generation].droppedObjNames[objNum]
objType = PartyGlobals.Name2DropObjectType[objName]
if objType.good:
if objNum not in self._id2gen[generation].droppedObjCaught:
if isLocal:
base.playSfx(self.sndGoodCatch)
fruit = self.getObjModel(objName)
toon = self.getAvatar(avId)
rHand = toon.getRightHands()[1]
self.toonSDs[avId].eatFruit(fruit, rHand)
else:
self.toonSDs[avId].fsm.request('fallForward')
self._id2gen[generation].droppedObjCaught[objNum] = 1
def setObjectCaught(self, avId, generation, objNum):
self.notify.info('setObjectCaught(%s, %s, %s)' % (avId, generation, objNum))
if self.activityFSM.state != 'Active':
DistributedPartyCatchActivity.notify.warning('ignoring msg: object %s caught by %s' % (objNum, avId))
return
isLocal = avId == base.localAvatar.doId
if not isLocal:
DistributedPartyCatchActivity.notify.debug('AI: avatar %s caught %s' % (avId, objNum))
self.finishDropInterval(generation, objNum)
self.showCatch(avId, generation, objNum)
self._scheduleGenerations()
gen = self._id2gen[generation]
if gen.hasBeenScheduled:
objName = gen.droppedObjNames[objNum]
if PartyGlobals.Name2DropObjectType[objName].good:
if hasattr(self, 'fruitsCaught'):
self.fruitsCaught += 1
def finishDropInterval(self, generation, objNum):
if hasattr(self, 'dropIntervals'):
if (generation, objNum) in self.dropIntervals:
self.dropIntervals[generation, objNum].finish()
def finishAllDropIntervals(self):
if hasattr(self, 'dropIntervals'):
for dropInterval in self.dropIntervals.values():
dropInterval.finish()
def setGenerations(self, generations):
self.notify.info('setGenerations(%s)' % (generations,))
gen2t = {}
gen2nt = {}
gen2np = {}
for id, timestamp32, numPlayers in generations:
gen2t[id] = globalClockDelta.networkToLocalTime(timestamp32, bits=32) - self._startTimestamp
gen2nt[id] = timestamp32
gen2np[id] = numPlayers
ids = self._id2gen.keys()
for id in ids:
if id not in gen2t:
self._removeGeneration(id)
for id in gen2t:
if id not in self._id2gen:
self._addGeneration(id, gen2t[id], gen2nt[id], gen2np[id])
def scheduleDrops(self, genId = None):
if genId is None:
genId = self.getCurGeneration()
gen = self._id2gen[genId]
if gen.hasBeenScheduled:
return
fruitIndex = int((gen.startTime + 0.5 * self.DropPeriod) / PartyGlobals.CatchActivityDuration)
fruitNames = ['apple',
'orange',
'pear',
'coconut',
'watermelon',
'pineapple']
fruitName = fruitNames[fruitIndex % len(fruitNames)]
rng = RandomNumGen(genId + self._generationSeedBase)
gen.droppedObjNames = [fruitName] * self.numFruits + ['anvil'] * self.numAnvils
rng.shuffle(gen.droppedObjNames)
dropPlacer = PartyRegionDropPlacer(self, gen.numPlayers, genId, gen.droppedObjNames, startTime=gen.startTime)
gen.numItemsDropped = 0
tIndex = gen.startTime % PartyGlobals.CatchActivityDuration
tPercent = float(tIndex) / PartyGlobals.CatchActivityDuration
gen.numItemsDropped += dropPlacer.skipPercent(tPercent)
while not dropPlacer.doneDropping(continuous=True):
nextDrop = dropPlacer.getNextDrop()
gen.dropSchedule.append(nextDrop)
gen.hasBeenScheduled = True
return
def startDropTask(self):
taskMgr.add(self.dropTask, self.DropTaskName)
def stopDropTask(self):
taskMgr.remove(self.DropTaskName)
def _scheduleGenerations(self):
curT = self.getCurrentCatchActivityTime()
genIndex = self._orderedGenerationIndex
newGenIndex = genIndex
while genIndex is None or genIndex < len(self._orderedGenerations) - 1:
if genIndex is None:
nextGenIndex = 0
else:
nextGenIndex = genIndex + 1
nextGenId = self._orderedGenerations[nextGenIndex]
nextGen = self._id2gen[nextGenId]
startT = nextGen.startTime
if curT >= startT:
newGenIndex = nextGenIndex
if not nextGen.hasBeenScheduled:
self.defineConstants(forceNumPlayers=nextGen.numPlayers)
self.scheduleDrops(genId=self._orderedGenerations[nextGenIndex])
genIndex = nextGenIndex
self._orderedGenerationIndex = newGenIndex
return
def dropTask(self, task):
self._scheduleGenerations()
curT = self.getCurrentCatchActivityTime()
if self._orderedGenerationIndex is not None:
i = self._orderedGenerationIndex
genIndex = self._orderedGenerations[i]
gen = self._id2gen[genIndex]
while len(gen.dropSchedule) > 0 and gen.dropSchedule[0][0] < curT:
drop = gen.dropSchedule[0]
gen.dropSchedule = gen.dropSchedule[1:]
dropTime, objName, dropCoords = drop
objNum = gen.numItemsDropped
x, y = self.grid2world(*dropCoords)
dropIval = self.getDropIval(x, y, objName, genIndex, objNum)
def cleanup(generation, objNum, self = self):
del self.dropIntervals[generation, objNum]
dropIval.append(Func(Functor(cleanup, genIndex, objNum)))
self.dropIntervals[genIndex, objNum] = dropIval
gen.numItemsDropped += 1
dropIval.start(curT - dropTime)
self._lastDropTime = dropTime
return Task.cont
def getDropIval(self, x, y, dropObjName, generation, num):
objType = PartyGlobals.Name2DropObjectType[dropObjName]
id = (generation, num)
dropNode = hidden.attachNewNode('catchDropNode%s' % (id,))
dropNode.setPos(x, y, 0)
shadow = self.dropShadow.copyTo(dropNode)
shadow.setZ(PartyGlobals.CatchDropShadowHeight)
shadow.setColor(1, 1, 1, 1)
object = self.getObjModel(dropObjName)
object.reparentTo(hidden)
if dropObjName in ['watermelon', 'anvil']:
objH = object.getH()
absDelta = {'watermelon': 12,
'anvil': 15}[dropObjName]
delta = (self.randomNumGen.random() * 2.0 - 1.0) * absDelta
newH = objH + delta
else:
newH = self.randomNumGen.random() * 360.0
object.setH(newH)
sphereName = 'FallObj%s' % (id,)
radius = self.ObjRadius
if objType.good:
radius *= lerp(1.0, 1.3, 0.5)
collSphere = CollisionSphere(0, 0, 0, radius)
collSphere.setTangible(0)
collNode = CollisionNode(sphereName)
collNode.setCollideMask(PartyGlobals.CatchActivityBitmask)
collNode.addSolid(collSphere)
collNodePath = object.attachNewNode(collNode)
collNodePath.hide()
if self.ShowObjSpheres:
collNodePath.show()
catchEventName = 'ltCatch' + sphereName
def eatCollEntry(forward, collEntry):
forward()
self.accept(catchEventName, Functor(eatCollEntry, Functor(self.__handleCatch, id[0], id[1])))
def cleanup(self = self, dropNode = dropNode, id = id, event = catchEventName):
self.ignore(event)
dropNode.removeNode()
duration = objType.fallDuration
onscreenDuration = objType.onscreenDuration
targetShadowScale = 0.3
if self.trickShadows:
intermedScale = targetShadowScale * (self.OffscreenTime / self.BaselineDropDuration)
shadowScaleIval = Sequence(LerpScaleInterval(shadow, self.OffscreenTime, intermedScale, startScale=0))
shadowScaleIval.append(LerpScaleInterval(shadow, duration - self.OffscreenTime, targetShadowScale, startScale=intermedScale))
else:
shadowScaleIval = LerpScaleInterval(shadow, duration, targetShadowScale, startScale=0)
targetShadowAlpha = 0.4
shadowAlphaIval = LerpColorScaleInterval(shadow, self.OffscreenTime, Point4(1, 1, 1, targetShadowAlpha), startColorScale=Point4(1, 1, 1, 0))
shadowIval = Parallel(shadowScaleIval, shadowAlphaIval)
if self.useGravity:
def setObjPos(t, objType = objType, object = object):
z = objType.trajectory.calcZ(t)
object.setZ(z)
setObjPos(0)
dropIval = LerpFunctionInterval(setObjPos, fromData=0, toData=onscreenDuration, duration=onscreenDuration)
else:
startPos = Point3(0, 0, self.MinOffscreenHeight)
object.setPos(startPos)
dropIval = LerpPosInterval(object, onscreenDuration, Point3(0, 0, 0), startPos=startPos, blendType='easeIn')
ival = Sequence(Func(Functor(dropNode.reparentTo, self.root)), Parallel(Sequence(WaitInterval(self.OffscreenTime), Func(Functor(object.reparentTo, dropNode)), dropIval), shadowIval), Func(cleanup), name='drop%s' % (id,))
if objType == PartyGlobals.Name2DropObjectType['anvil']:
ival.append(Func(self.playAnvil))
return ival
def playAnvil(self):
if base.localAvatar.doId in self.toonIds:
base.playSfx(self.sndAnvilLand)
def initOrthoWalk(self):
DistributedPartyCatchActivity.notify.debug('startOrthoWalk')
def doCollisions(oldPos, newPos, self = self):
x = bound(newPos[0], self.StageHalfWidth, -self.StageHalfWidth)
y = bound(newPos[1], self.StageHalfHeight, -self.StageHalfHeight)
newPos.setX(x)
newPos.setY(y)
return newPos
orthoDrive = OrthoDrive(self.ToonSpeed, instantTurn=True)
self.orthoWalk = OrthoWalk(orthoDrive, broadcast=True)
def destroyOrthoWalk(self):
DistributedPartyCatchActivity.notify.debug('destroyOrthoWalk')
if hasattr(self, 'orthoWalk'):
self.orthoWalk.stop()
self.orthoWalk.destroy()
del self.orthoWalk
def startIdle(self):
DistributedPartyCatchActivity.notify.debug('startIdle')
def finishIdle(self):
DistributedPartyCatchActivity.notify.debug('finishIdle')
def startActive(self):
DistributedPartyCatchActivity.notify.debug('startActive')
for avId in self.toonIds:
if avId in self.toonSDs:
toonSD = self.toonSDs[avId]
toonSD.enter()
toonSD.fsm.request('normal')
self.fruitsCaught = 0
self.dropIntervals = {}
self.startDropTask()
if base.localAvatar.doId in self.toonIds:
self.putLocalAvatarInActivity()
def finishActive(self):
DistributedPartyCatchActivity.notify.debug('finishActive')
self.stopDropTask()
if hasattr(self, 'finishIval'):
self.finishIval.pause()
del self.finishIval
if base.localAvatar.doId in self.toonIds:
self.takeLocalAvatarOutOfActivity()
for ival in self.dropIntervals.values():
ival.finish()
del self.dropIntervals
def startConclusion(self):
DistributedPartyCatchActivity.notify.debug('startConclusion')
for avId in self.toonIds:
if avId in self.toonSDs:
toonSD = self.toonSDs[avId]
toonSD.fsm.request('notPlaying')
self.destroyCatchCollisions()
if base.localAvatar.doId not in self.toonIds:
return
else:
self.localToonExiting()
if self.fruitsCaught >= self.numFruits:
finishText = TTLocalizer.PartyCatchActivityFinishPerfect
else:
finishText = TTLocalizer.PartyCatchActivityFinish
perfectTextSubnode = hidden.attachNewNode(self.__genText(finishText))
perfectText = hidden.attachNewNode('perfectText')
perfectTextSubnode.reparentTo(perfectText)
frame = self.__textGen.getCardActual()
offsetY = -abs(frame[2] + frame[3]) / 2.0
perfectTextSubnode.setPos(0, 0, offsetY)
perfectText.setColor(1, 0.1, 0.1, 1)
def fadeFunc(t, text = perfectText):
text.setColorScale(1, 1, 1, t)
def destroyText(text = perfectText):
text.removeNode()
textTrack = Sequence(Func(perfectText.reparentTo, aspect2d), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=0.3, startScale=0.0), LerpFunctionInterval(fadeFunc, fromData=0.0, toData=1.0, duration=0.5)), Wait(2.0), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=1.0), LerpFunctionInterval(fadeFunc, fromData=1.0, toData=0.0, duration=0.5, blendType='easeIn')), Func(destroyText), WaitInterval(0.5))
soundTrack = SoundInterval(self.sndPerfect)
self.finishIval = Parallel(textTrack, soundTrack)
self.finishIval.start()
def finishConclusion(self):
DistributedPartyCatchActivity.notify.debug('finishConclusion')
if base.localAvatar.doId in self.toonIds:
self.takeLocalAvatarOutOfActivity()
base.cr.playGame.getPlace().fsm.request('walk')
def showJellybeanReward(self, earnedAmount, jarAmount, message):
if earnedAmount > 0:
DistributedPartyActivity.showJellybeanReward(self, earnedAmount, jarAmount, message)
else:
base.cr.playGame.getPlace().fsm.request('walk')
|
|
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
import copy
import fnmatch
import re
from collections import defaultdict
from whoosh import matching
from whoosh.analysis import Token
from whoosh.compat import bytes_type, text_type, u
from whoosh.lang.morph_en import variations
from whoosh.query import qcore
class Term(qcore.Query):
"""Matches documents containing the given term (fieldname+text pair).
>>> Term("content", u"render")
"""
__inittypes__ = dict(fieldname=str, text=text_type, boost=float)
def __init__(self, fieldname, text, boost=1.0, minquality=None):
self.fieldname = fieldname
self.text = text
self.boost = boost
self.minquality = minquality
def __eq__(self, other):
return (other
and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text
and self.boost == other.boost)
def __repr__(self):
r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
if self.boost != 1.0:
r += ", boost=%s" % self.boost
r += ")"
return r
def __unicode__(self):
t = u("%s:%s") % (self.fieldname, self.text)
if self.boost != 1:
t += u("^") + text_type(self.boost)
return t
__str__ = __unicode__
def __hash__(self):
return hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
def has_terms(self):
return True
def tokens(self, boost=1.0):
yield Token(fieldname=self.fieldname, text=self.text,
boost=boost * self.boost, startchar=self.startchar,
endchar=self.endchar, chars=True)
def terms(self, phrases=False):
if self.field():
yield (self.field(), self.text)
def replace(self, fieldname, oldtext, newtext):
q = copy.copy(self)
if q.fieldname == fieldname and q.text == oldtext:
q.text = newtext
return q
def estimate_size(self, ixreader):
fieldname = self.fieldname
if fieldname not in ixreader.schema:
return 0
field = ixreader.schema[fieldname]
try:
text = field.to_bytes(self.text)
except ValueError:
return 0
return ixreader.doc_frequency(fieldname, text)
def matcher(self, searcher, context=None):
fieldname = self.fieldname
text = self.text
if fieldname not in searcher.schema:
return matching.NullMatcher()
field = searcher.schema[fieldname]
try:
text = field.to_bytes(text)
except ValueError:
return matching.NullMatcher()
if (self.fieldname, text) in searcher.reader():
if context is None:
w = searcher.weighting
else:
w = context.weighting
m = searcher.postings(self.fieldname, text, weighting=w)
if self.minquality:
m.set_min_quality(self.minquality)
if self.boost != 1.0:
m = matching.WrappingMatcher(m, boost=self.boost)
return m
else:
return matching.NullMatcher()
class MultiTerm(qcore.Query):
"""Abstract base class for queries that operate on multiple terms in the
same field.
"""
constantscore = False
def _btexts(self, ixreader):
raise NotImplementedError(self.__class__.__name__)
def expanded_terms(self, ixreader, phrases=False):
fieldname = self.field()
if fieldname:
for btext in self._btexts(ixreader):
yield (fieldname, btext)
def tokens(self, boost=1.0):
yield Token(fieldname=self.fieldname, text=self.text,
boost=boost * self.boost, startchar=self.startchar,
endchar=self.endchar, chars=True)
def simplify(self, ixreader):
if self.fieldname not in ixreader.schema:
return qcore.NullQuery()
field = ixreader.schema[self.fieldname]
existing = []
for btext in sorted(set(self._btexts(ixreader))):
text = field.from_bytes(btext)
existing.append(Term(self.fieldname, text, boost=self.boost))
if len(existing) == 1:
return existing[0]
elif existing:
from whoosh.query import Or
return Or(existing)
else:
return qcore.NullQuery
def estimate_size(self, ixreader):
return sum(ixreader.doc_frequency(self.fieldname, btext)
for btext in self._btexts(ixreader))
def estimate_min_size(self, ixreader):
return min(ixreader.doc_frequency(self.fieldname, text)
for text in self._btexts(ixreader))
def matcher(self, searcher, context=None):
from whoosh.query import Or
from whoosh.util import now
fieldname = self.fieldname
constantscore = self.constantscore
reader = searcher.reader()
qs = [Term(fieldname, word) for word in self._btexts(reader)]
if not qs:
return matching.NullMatcher()
if len(qs) == 1:
# If there's only one term, just use it
m = qs[0].matcher(searcher, context)
else:
if constantscore:
# To tell the sub-query that score doesn't matter, set weighting
# to None
if context:
context = context.set(weighting=None)
else:
from whoosh.searching import SearchContext
context = SearchContext(weighting=None)
# Or the terms together
m = Or(qs, boost=self.boost).matcher(searcher, context)
return m
class PatternQuery(MultiTerm):
"""An intermediate base class for common methods of Prefix and Wildcard.
"""
__inittypes__ = dict(fieldname=str, text=text_type, boost=float)
def __init__(self, fieldname, text, boost=1.0, constantscore=True):
self.fieldname = fieldname
self.text = text
self.boost = boost
self.constantscore = constantscore
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text and self.boost == other.boost
and self.constantscore == other.constantscore)
def __repr__(self):
r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
if self.boost != 1:
r += ", boost=%s" % self.boost
r += ")"
return r
def __hash__(self):
return (hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
^ hash(self.constantscore))
def _get_pattern(self):
raise NotImplementedError
def _find_prefix(self, text):
# Subclasses/instances should set the SPECIAL_CHARS attribute to a set
# of characters that mark the end of the literal prefix
specialchars = self.SPECIAL_CHARS
i = 0
for i, char in enumerate(text):
if char in specialchars:
break
return text[:i]
def _btexts(self, ixreader):
field = ixreader.schema[self.fieldname]
exp = re.compile(self._get_pattern())
prefix = self._find_prefix(self.text)
if prefix:
candidates = ixreader.expand_prefix(self.fieldname, prefix)
else:
candidates = ixreader.lexicon(self.fieldname)
from_bytes = field.from_bytes
for btext in candidates:
text = from_bytes(btext)
if exp.match(text):
yield btext
class Prefix(PatternQuery):
"""Matches documents that contain any terms that start with the given text.
>>> # Match documents containing words starting with 'comp'
>>> Prefix("content", u"comp")
"""
def __unicode__(self):
return "%s:%s*" % (self.fieldname, self.text)
__str__ = __unicode__
def _btexts(self, ixreader):
return ixreader.expand_prefix(self.fieldname, self.text)
def matcher(self, searcher, context=None):
if self.text == "":
from whoosh.query import Every
eq = Every(self.fieldname, boost=self.boost)
return eq.matcher(searcher, context)
else:
return PatternQuery.matcher(self, searcher, context)
class Wildcard(PatternQuery):
"""Matches documents that contain any terms that match a "glob" pattern.
See the Python ``fnmatch`` module for information about globs.
>>> Wildcard("content", u"in*f?x")
"""
SPECIAL_CHARS = frozenset("*?[")
def __unicode__(self):
return "%s:%s" % (self.fieldname, self.text)
__str__ = __unicode__
def _get_pattern(self):
return fnmatch.translate(self.text)
def normalize(self):
# If there are no wildcard characters in this "wildcard", turn it into
# a simple Term
text = self.text
if text == "*":
from whoosh.query import Every
return Every(self.fieldname, boost=self.boost)
if "*" not in text and "?" not in text:
# If no wildcard chars, convert to a normal term.
return Term(self.fieldname, self.text, boost=self.boost)
elif ("?" not in text and text.endswith("*")
and text.find("*") == len(text) - 1):
# If the only wildcard char is an asterisk at the end, convert to a
# Prefix query.
return Prefix(self.fieldname, self.text[:-1], boost=self.boost)
else:
return self
def matcher(self, searcher, context=None):
if self.text == "*":
from whoosh.query import Every
eq = Every(self.fieldname, boost=self.boost)
return eq.matcher(searcher, context)
else:
return PatternQuery.matcher(self, searcher, context)
# _btexts() implemented in PatternQuery
class Regex(PatternQuery):
"""Matches documents that contain any terms that match a regular
expression. See the Python ``re`` module for information about regular
expressions.
"""
SPECIAL_CHARS = frozenset("{}()[].?*+^$\\")
def __unicode__(self):
return '%s:r"%s"' % (self.fieldname, self.text)
__str__ = __unicode__
def _get_pattern(self):
return self.text
def _find_prefix(self, text):
if "|" in text:
return ""
if text.startswith("^"):
text = text[1:]
elif text.startswith("\\A"):
text = text[2:]
prefix = PatternQuery._find_prefix(self, text)
lp = len(prefix)
if lp < len(text) and text[lp] in "*?":
# we stripped something starting from * or ? - they both MAY mean
# "0 times". As we had stripped starting from FIRST special char,
# that implies there were only ordinary chars left of it. Thus,
# the very last of them is not part of the real prefix:
prefix = prefix[:-1]
return prefix
def matcher(self, searcher, context=None):
if self.text == ".*":
from whoosh.query import Every
eq = Every(self.fieldname, boost=self.boost)
return eq.matcher(searcher, context)
else:
return PatternQuery.matcher(self, searcher, context)
# _btexts() implemented in PatternQuery
class ExpandingTerm(MultiTerm):
"""Intermediate base class for queries such as FuzzyTerm and Variations
that expand into multiple queries, but come from a single term.
"""
def has_terms(self):
return True
def terms(self, phrases=False):
if self.field():
yield (self.field(), self.text)
class FuzzyTerm(ExpandingTerm):
"""Matches documents containing words similar to the given term.
"""
__inittypes__ = dict(fieldname=str, text=text_type, boost=float,
maxdist=float, prefixlength=int)
def __init__(self, fieldname, text, boost=1.0, maxdist=1,
prefixlength=1, constantscore=True):
"""
:param fieldname: The name of the field to search.
:param text: The text to search for.
:param boost: A boost factor to apply to scores of documents matching
this query.
:param maxdist: The maximum edit distance from the given text.
:param prefixlength: The matched terms must share this many initial
characters with 'text'. For example, if text is "light" and
prefixlength is 2, then only terms starting with "li" are checked
for similarity.
"""
self.fieldname = fieldname
self.text = text
self.boost = boost
self.maxdist = maxdist
self.prefixlength = prefixlength
self.constantscore = constantscore
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text
and self.maxdist == other.maxdist
and self.prefixlength == other.prefixlength
and self.boost == other.boost
and self.constantscore == other.constantscore)
def __repr__(self):
r = "%s(%r, %r, boost=%f, maxdist=%d, prefixlength=%d)"
return r % (self.__class__.__name__, self.fieldname, self.text,
self.boost, self.maxdist, self.prefixlength)
def __unicode__(self):
r = u("%s:%s") % (self.fieldname, self.text) + u("~")
if self.maxdist > 1:
r += u("%d") % self.maxdist
if self.boost != 1.0:
r += u("^%f") % self.boost
return r
__str__ = __unicode__
def __hash__(self):
return (hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
^ hash(self.maxdist) ^ hash(self.prefixlength)
^ hash(self.constantscore))
def _btexts(self, ixreader):
return ixreader.terms_within(self.fieldname, self.text, self.maxdist,
prefix=self.prefixlength)
class Variations(ExpandingTerm):
"""Query that automatically searches for morphological variations of the
given word in the same field.
"""
def __init__(self, fieldname, text, boost=1.0):
self.fieldname = fieldname
self.text = text
self.boost = boost
def __repr__(self):
r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
if self.boost != 1:
r += ", boost=%s" % self.boost
r += ")"
return r
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text and self.boost == other.boost)
def __hash__(self):
return hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
def _btexts(self, ixreader):
fieldname = self.fieldname
to_bytes = ixreader.schema[fieldname].to_bytes
for word in variations(self.text):
try:
btext = to_bytes(word)
except ValueError:
continue
if (fieldname, btext) in ixreader:
yield btext
def __unicode__(self):
return u("%s:<%s>") % (self.fieldname, self.text)
__str__ = __unicode__
def replace(self, fieldname, oldtext, newtext):
q = copy.copy(self)
if q.fieldname == fieldname and q.text == oldtext:
q.text = newtext
return q
|
|
import os
import sys
import difflib
import __builtin__
import re
import py_compile
import pydoc
import contextlib
import inspect
import keyword
import pkgutil
import unittest
import xml.etree
import types
import test.test_support
import xml.etree.ElementTree
from collections import namedtuple
from test.script_helper import assert_python_ok
from test.test_support import (TESTFN, rmtree, reap_children, captured_stdout,
captured_stderr, requires_docstrings)
from test import pydoc_mod
if test.test_support.HAVE_DOCSTRINGS:
expected_data_docstrings = (
'dictionary for instance variables (if defined)',
'list of weak references to the object (if defined)',
)
else:
expected_data_docstrings = ('', '')
expected_text_pattern = \
"""
NAME
test.pydoc_mod - This is a test module for test_pydoc
FILE
%s
%s
CLASSES
__builtin__.object
B
C
A
\x20\x20\x20\x20
class A
| Hello and goodbye
|\x20\x20
| Methods defined here:
|\x20\x20
| __init__()
| Wow, I have no function!
\x20\x20\x20\x20
class B(__builtin__.object)
| Data descriptors defined here:
|\x20\x20
| __dict__%s
|\x20\x20
| __weakref__%s
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|\x20\x20
| NO_MEANING = 'eggs'
\x20\x20\x20\x20
class C(__builtin__.object)
| Methods defined here:
|\x20\x20
| get_answer(self)
| Return say_no()
|\x20\x20
| is_it_true(self)
| Return self.get_answer()
|\x20\x20
| say_no(self)
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors defined here:
|\x20\x20
| __dict__
| dictionary for instance variables (if defined)
|\x20\x20
| __weakref__
| list of weak references to the object (if defined)
FUNCTIONS
doc_func()
This function solves all of the world's problems:
hunger
lack of Python
war
\x20\x20\x20\x20
nodoc_func()
DATA
__author__ = 'Benjamin Peterson'
__credits__ = 'Nobody'
__version__ = '1.2.3.4'
VERSION
1.2.3.4
AUTHOR
Benjamin Peterson
CREDITS
Nobody
""".strip()
expected_text_data_docstrings = tuple('\n | ' + s if s else ''
for s in expected_data_docstrings)
expected_html_pattern = \
"""
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="#7799ee">
<td valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"> <br><big><big><strong><a href="test.html"><font color="#ffffff">test</font></a>.pydoc_mod</strong></big></big> (version 1.2.3.4)</font></td
><td align=right valign=bottom
><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="file:%s">%s</a>%s</font></td></tr></table>
<p><tt>This is a test module for test_pydoc</tt></p>
<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ee77aa">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ee77aa"><tt> </tt></td><td> </td>
<td width="100%%"><dl>
<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
</font></dt><dd>
<dl>
<dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#B">B</a>
</font></dt><dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#C">C</a>
</font></dt></dl>
</dd>
<dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#A">A</a>
</font></dt></dl>
<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="A">class <strong>A</strong></a></font></td></tr>
\x20\x20\x20\x20
<tr bgcolor="#ffc8d8"><td rowspan=2><tt> </tt></td>
<td colspan=2><tt>Hello and goodbye<br> </tt></td></tr>
<tr><td> </td>
<td width="100%%">Methods defined here:<br>
<dl><dt><a name="A-__init__"><strong>__init__</strong></a>()</dt><dd><tt>Wow, I have no function!</tt></dd></dl>
</td></tr></table> <p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="B">class <strong>B</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ffc8d8"><tt> </tt></td><td> </td>
<td width="100%%">Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>%s</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>%s</tt></dd>
</dl>
<hr>
Data and other attributes defined here:<br>
<dl><dt><strong>NO_MEANING</strong> = 'eggs'</dl>
</td></tr></table> <p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="C">class <strong>C</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ffc8d8"><tt> </tt></td><td> </td>
<td width="100%%">Methods defined here:<br>
<dl><dt><a name="C-get_answer"><strong>get_answer</strong></a>(self)</dt><dd><tt>Return <a href="#C-say_no">say_no</a>()</tt></dd></dl>
<dl><dt><a name="C-is_it_true"><strong>is_it_true</strong></a>(self)</dt><dd><tt>Return self.<a href="#C-get_answer">get_answer</a>()</tt></dd></dl>
<dl><dt><a name="C-say_no"><strong>say_no</strong></a>(self)</dt></dl>
<hr>
Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>dictionary for instance variables (if defined)</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>list of weak references to the object (if defined)</tt></dd>
</dl>
</td></tr></table></td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#eeaa77">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#eeaa77"><tt> </tt></td><td> </td>
<td width="100%%"><dl><dt><a name="-doc_func"><strong>doc_func</strong></a>()</dt><dd><tt>This function solves all of the world's problems:<br>
hunger<br>
lack of Python<br>
war</tt></dd></dl>
<dl><dt><a name="-nodoc_func"><strong>nodoc_func</strong></a>()</dt></dl>
</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#55aa55">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#55aa55"><tt> </tt></td><td> </td>
<td width="100%%"><strong>__author__</strong> = 'Benjamin Peterson'<br>
<strong>__credits__</strong> = 'Nobody'<br>
<strong>__version__</strong> = '1.2.3.4'</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Author</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Benjamin Peterson</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Credits</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Nobody</td></tr></table>
""".strip()
expected_html_data_docstrings = tuple(s.replace(' ', ' ')
for s in expected_data_docstrings)
# output pattern for missing module
missing_pattern = "no Python documentation found for '%s'"
# output pattern for module with bad imports
badimport_pattern = "problem in %s - <type 'exceptions.ImportError'>: No module named %s"
def run_pydoc(module_name, *args, **env):
"""
Runs pydoc on the specified module. Returns the stripped
output of pydoc.
"""
args = args + (module_name,)
# do not write bytecode files to avoid caching errors
rc, out, err = assert_python_ok('-B', pydoc.__file__, *args, **env)
return out.strip()
def get_pydoc_html(module):
"Returns pydoc generated output as html"
doc = pydoc.HTMLDoc()
output = doc.docmodule(module)
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "<br><a href=\"" + loc + "\">Module Docs</a>"
return output.strip(), loc
def get_pydoc_link(module):
"Returns a documentation web link of a module"
dirname = os.path.dirname
basedir = dirname(dirname(__file__))
doc = pydoc.TextDoc()
loc = doc.getdocloc(module, basedir=basedir)
return loc
def get_pydoc_text(module):
"Returns pydoc generated output as text"
doc = pydoc.TextDoc()
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "\nMODULE DOCS\n " + loc + "\n"
output = doc.docmodule(module)
# cleanup the extra text formatting that pydoc preforms
patt = re.compile('\b.')
output = patt.sub('', output)
return output.strip(), loc
def print_diffs(text1, text2):
"Prints unified diffs for two texts"
lines1 = text1.splitlines(True)
lines2 = text2.splitlines(True)
diffs = difflib.unified_diff(lines1, lines2, n=0, fromfile='expected',
tofile='got')
print '\n' + ''.join(diffs)
class PydocBaseTest(unittest.TestCase):
def _restricted_walk_packages(self, walk_packages, path=None):
"""
A version of pkgutil.walk_packages() that will restrict itself to
a given path.
"""
default_path = path or [os.path.dirname(__file__)]
def wrapper(path=None, prefix='', onerror=None):
return walk_packages(path or default_path, prefix, onerror)
return wrapper
@contextlib.contextmanager
def restrict_walk_packages(self, path=None):
walk_packages = pkgutil.walk_packages
pkgutil.walk_packages = self._restricted_walk_packages(walk_packages,
path)
try:
yield
finally:
pkgutil.walk_packages = walk_packages
class PydocDocTest(unittest.TestCase):
@requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_html_doc(self):
result, doc_loc = get_pydoc_html(pydoc_mod)
mod_file = inspect.getabsfile(pydoc_mod)
if sys.platform == 'win32':
import nturl2path
mod_url = nturl2path.pathname2url(mod_file)
else:
mod_url = mod_file
expected_html = expected_html_pattern % (
(mod_url, mod_file, doc_loc) +
expected_html_data_docstrings)
if result != expected_html:
print_diffs(expected_html, result)
self.fail("outputs are not equal, see diff above")
@requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_text_doc(self):
result, doc_loc = get_pydoc_text(pydoc_mod)
expected_text = expected_text_pattern % (
(inspect.getabsfile(pydoc_mod), doc_loc) +
expected_text_data_docstrings)
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
def test_mixed_case_module_names_are_lower_cased(self):
# issue16484
doc_link = get_pydoc_link(xml.etree.ElementTree)
self.assertIn('xml.etree.elementtree', doc_link)
def test_issue8225(self):
# Test issue8225 to ensure no doc link appears for xml.etree
result, doc_loc = get_pydoc_text(xml.etree)
self.assertEqual(doc_loc, "", "MODULE DOCS incorrectly includes a link")
def test_getpager_with_stdin_none(self):
previous_stdin = sys.stdin
try:
sys.stdin = None
pydoc.getpager() # Shouldn't fail.
finally:
sys.stdin = previous_stdin
def test_non_str_name(self):
# issue14638
# Treat illegal (non-str) name like no name
class A:
__name__ = 42
class B:
pass
adoc = pydoc.render_doc(A())
bdoc = pydoc.render_doc(B())
self.assertEqual(adoc.replace("A", "B"), bdoc)
def test_not_here(self):
missing_module = "test.i_am_not_here"
result = run_pydoc(missing_module)
expected = missing_pattern % missing_module
self.assertEqual(expected, result,
"documentation for missing module found")
def test_input_strip(self):
missing_module = " test.i_am_not_here "
result = run_pydoc(missing_module)
expected = missing_pattern % missing_module.strip()
self.assertEqual(expected, result,
"white space was not stripped from module name "
"or other error output mismatch")
def test_stripid(self):
# test with strings, other implementations might have different repr()
stripid = pydoc.stripid
# strip the id
self.assertEqual(stripid('<function stripid at 0x88dcee4>'),
'<function stripid>')
self.assertEqual(stripid('<function stripid at 0x01F65390>'),
'<function stripid>')
# nothing to strip, return the same text
self.assertEqual(stripid('42'), '42')
self.assertEqual(stripid("<type 'exceptions.Exception'>"),
"<type 'exceptions.Exception'>")
def test_synopsis(self):
with test.test_support.temp_cwd() as test_dir:
init_path = os.path.join(test_dir, 'dt.py')
with open(init_path, 'w') as fobj:
fobj.write('''\
"""
my doc
second line
"""
foo = 1
''')
py_compile.compile(init_path)
synopsis = pydoc.synopsis(init_path, {})
self.assertEqual(synopsis, 'my doc')
@unittest.skipIf(sys.flags.optimize >= 2,
'Docstrings are omitted with -OO and above')
def test_synopsis_sourceless_empty_doc(self):
with test.test_support.temp_cwd() as test_dir:
init_path = os.path.join(test_dir, 'foomod42.py')
cached_path = os.path.join(test_dir, 'foomod42.pyc')
with open(init_path, 'w') as fobj:
fobj.write("foo = 1")
py_compile.compile(init_path)
synopsis = pydoc.synopsis(init_path, {})
self.assertIsNone(synopsis)
synopsis_cached = pydoc.synopsis(cached_path, {})
self.assertIsNone(synopsis_cached)
class PydocImportTest(PydocBaseTest):
def setUp(self):
self.test_dir = os.mkdir(TESTFN)
self.addCleanup(rmtree, TESTFN)
def test_badimport(self):
# This tests the fix for issue 5230, where if pydoc found the module
# but the module had an internal import error pydoc would report no doc
# found.
modname = 'testmod_xyzzy'
testpairs = (
('i_am_not_here', 'i_am_not_here'),
('test.i_am_not_here_either', 'i_am_not_here_either'),
('test.i_am_not_here.neither_am_i', 'i_am_not_here.neither_am_i'),
('i_am_not_here.{}'.format(modname),
'i_am_not_here.{}'.format(modname)),
('test.{}'.format(modname), modname),
)
sourcefn = os.path.join(TESTFN, modname) + os.extsep + "py"
for importstring, expectedinmsg in testpairs:
with open(sourcefn, 'w') as f:
f.write("import {}\n".format(importstring))
result = run_pydoc(modname, PYTHONPATH=TESTFN)
expected = badimport_pattern % (modname, expectedinmsg)
self.assertEqual(expected, result)
def test_apropos_with_bad_package(self):
# Issue 7425 - pydoc -k failed when bad package on path
pkgdir = os.path.join(TESTFN, "syntaxerr")
os.mkdir(pkgdir)
badsyntax = os.path.join(pkgdir, "__init__") + os.extsep + "py"
with open(badsyntax, 'w') as f:
f.write("invalid python syntax = $1\n")
with self.restrict_walk_packages(path=[TESTFN]):
with captured_stdout() as out:
with captured_stderr() as err:
pydoc.apropos('xyzzy')
# No result, no error
self.assertEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), '')
# The package name is still matched
with captured_stdout() as out:
with captured_stderr() as err:
pydoc.apropos('syntaxerr')
self.assertEqual(out.getvalue().strip(), 'syntaxerr')
self.assertEqual(err.getvalue(), '')
def test_apropos_with_unreadable_dir(self):
# Issue 7367 - pydoc -k failed when unreadable dir on path
self.unreadable_dir = os.path.join(TESTFN, "unreadable")
os.mkdir(self.unreadable_dir, 0)
self.addCleanup(os.rmdir, self.unreadable_dir)
# Note, on Windows the directory appears to be still
# readable so this is not really testing the issue there
with self.restrict_walk_packages(path=[TESTFN]):
with captured_stdout() as out:
with captured_stderr() as err:
pydoc.apropos('SOMEKEY')
# No result, no error
self.assertEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), '')
class TestDescriptions(unittest.TestCase):
def test_module(self):
# Check that pydocfodder module can be described
from test import pydocfodder
doc = pydoc.render_doc(pydocfodder)
self.assertIn("pydocfodder", doc)
def test_classic_class(self):
class C: "Classic class"
c = C()
self.assertEqual(pydoc.describe(C), 'class C')
self.assertEqual(pydoc.describe(c), 'instance of C')
expected = 'instance of C in module %s' % __name__
self.assertIn(expected, pydoc.render_doc(c))
def test_class(self):
class C(object): "New-style class"
c = C()
self.assertEqual(pydoc.describe(C), 'class C')
self.assertEqual(pydoc.describe(c), 'C')
expected = 'C in module %s object' % __name__
self.assertIn(expected, pydoc.render_doc(c))
def test_namedtuple_public_underscore(self):
NT = namedtuple('NT', ['abc', 'def'], rename=True)
with captured_stdout() as help_io:
pydoc.help(NT)
helptext = help_io.getvalue()
self.assertIn('_1', helptext)
self.assertIn('_replace', helptext)
self.assertIn('_asdict', helptext)
@unittest.skipUnless(test.test_support.have_unicode,
"test requires unicode support")
class TestUnicode(unittest.TestCase):
def setUp(self):
# Better not to use unicode escapes in literals, lest the
# parser choke on it if Python has been built without
# unicode support.
self.Q = types.ModuleType(
'Q', 'Rational numbers: \xe2\x84\x9a'.decode('utf8'))
self.Q.__version__ = '\xe2\x84\x9a'.decode('utf8')
self.Q.__date__ = '\xe2\x84\x9a'.decode('utf8')
self.Q.__author__ = '\xe2\x84\x9a'.decode('utf8')
self.Q.__credits__ = '\xe2\x84\x9a'.decode('utf8')
self.assertIsInstance(self.Q.__doc__, unicode)
def test_render_doc(self):
# render_doc is robust against unicode in docstrings
doc = pydoc.render_doc(self.Q)
self.assertIsInstance(doc, str)
def test_encode(self):
# _encode is robust against characters out the specified encoding
self.assertEqual(pydoc._encode(self.Q.__doc__, 'ascii'), 'Rational numbers: ℚ')
def test_pipepager(self):
# pipepager does not choke on unicode
doc = pydoc.render_doc(self.Q)
saved, os.popen = os.popen, open
try:
with test.test_support.temp_cwd():
pydoc.pipepager(doc, 'pipe')
self.assertEqual(open('pipe').read(), pydoc._encode(doc))
finally:
os.popen = saved
def test_tempfilepager(self):
# tempfilepager does not choke on unicode
doc = pydoc.render_doc(self.Q)
output = {}
def mock_system(cmd):
filename = cmd.strip()[1:-1]
self.assertEqual('"' + filename + '"', cmd.strip())
output['content'] = open(filename).read()
saved, os.system = os.system, mock_system
try:
pydoc.tempfilepager(doc, '')
self.assertEqual(output['content'], pydoc._encode(doc))
finally:
os.system = saved
def test_plainpager(self):
# plainpager does not choke on unicode
doc = pydoc.render_doc(self.Q)
# Note: captured_stdout is too permissive when it comes to
# unicode, and using it here would make the test always
# pass.
with test.test_support.temp_cwd():
with open('output', 'w') as f:
saved, sys.stdout = sys.stdout, f
try:
pydoc.plainpager(doc)
finally:
sys.stdout = saved
self.assertIn('Rational numbers:', open('output').read())
def test_ttypager(self):
# ttypager does not choke on unicode
doc = pydoc.render_doc(self.Q)
# Test ttypager
with test.test_support.temp_cwd(), test.test_support.captured_stdin():
with open('output', 'w') as f:
saved, sys.stdout = sys.stdout, f
try:
pydoc.ttypager(doc)
finally:
sys.stdout = saved
self.assertIn('Rational numbers:', open('output').read())
def test_htmlpage(self):
# html.page does not choke on unicode
with test.test_support.temp_cwd():
with captured_stdout() as output:
pydoc.writedoc(self.Q)
self.assertEqual(output.getvalue(), 'wrote Q.html\n')
class TestHelper(unittest.TestCase):
def test_keywords(self):
self.assertEqual(sorted(pydoc.Helper.keywords),
sorted(keyword.kwlist))
def test_builtin(self):
for name in ('str', 'str.translate', '__builtin__.str',
'__builtin__.str.translate'):
# test low-level function
self.assertIsNotNone(pydoc.locate(name))
# test high-level function
try:
pydoc.render_doc(name)
except ImportError:
self.fail('finding the doc of {!r} failed'.format(name))
for name in ('not__builtin__', 'strrr', 'strr.translate',
'str.trrrranslate', '__builtin__.strrr',
'__builtin__.str.trrranslate'):
self.assertIsNone(pydoc.locate(name))
self.assertRaises(ImportError, pydoc.render_doc, name)
def test_main():
try:
test.test_support.run_unittest(PydocDocTest,
PydocImportTest,
TestDescriptions,
TestUnicode,
TestHelper)
finally:
reap_children()
if __name__ == "__main__":
test_main()
|
|
#! /usr/bin/python3
import sys
import os
import threading
import decimal
import time
import json
import re
import requests
import collections
import logging
import binascii
from datetime import datetime
from dateutil.tz import tzlocal
import argparse
import configparser
import appdirs
import tarfile
import urllib.request
import shutil
import codecs
import tempfile
logger = logging.getLogger(__name__)
D = decimal.Decimal
from counterpartylib import server
from counterpartylib.lib import config
from counterpartylib.lib.util import value_input, value_output
rpc_sessions = {}
class JsonDecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, D):
return str(o)
return super(JsonDecimalEncoder, self).default(o)
json_dump = lambda x: json.dumps(x, sort_keys=True, indent=4, cls=JsonDecimalEncoder)
json_print = lambda x: print(json_dump(x))
class RPCError(Exception):
pass
class AssetError(Exception):
pass
def rpc(url, method, params=None, ssl_verify=False, tries=1):
headers = {'content-type': 'application/json'}
payload = {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": 0,
}
if url not in rpc_sessions:
rpc_session = requests.Session()
rpc_sessions[url] = rpc_session
else:
rpc_session = rpc_sessions[url]
response = None
for i in range(tries):
try:
response = rpc_session.post(url, data=json.dumps(payload), headers=headers, verify=ssl_verify, timeout=config.REQUESTS_TIMEOUT)
if i > 0:
logger.debug('Successfully connected.')
break
except requests.exceptions.SSLError as e:
raise e
except requests.exceptions.Timeout as e:
raise e
except requests.exceptions.ConnectionError:
logger.debug('Could not connect to {}. (Try {}/{})'.format(url, i+1, tries))
time.sleep(5)
if response == None:
raise RPCError('Cannot communicate with {}.'.format(url))
elif response.status_code not in (200, 500):
raise RPCError(str(response.status_code) + ' ' + response.reason + ' ' + response.text)
# Return result, with error handling.
response_json = response.json()
if 'error' not in response_json.keys() or response_json['error'] == None:
return response_json['result']
else:
raise RPCError('{}'.format(response_json['error']))
def api(method, params=None):
return rpc(config.COUNTERPARTY_RPC, method, params=params, ssl_verify=config.COUNTERPARTY_RPC_SSL_VERIFY)
def wallet_api(method, params=None):
return rpc(config.WALLET_URL, method, params=params, ssl_verify=config.WALLET_SSL_VERIFY)
def is_divisible(asset):
if asset in (config.BTC, config.XCP, 'leverage', 'value', 'fraction', 'price', 'odds'):
return True
else:
sql = '''SELECT * FROM issuances WHERE (status = ? AND asset = ?)'''
bindings = ['valid', asset]
issuances = api('sql', {'query': sql, 'bindings': bindings})
if not issuances: raise AssetError('No such asset: {}'.format(asset))
return issuances[0]['divisible']
def value_in(quantity, asset, divisible=None):
if divisible is None:
divisible = is_divisible(asset)
return value_input(quantity, asset, divisible)
def value_out(quantity, asset, divisible=None):
if divisible is None:
divisible = is_divisible(asset)
return value_output(quantity, asset, divisible)
def bootstrap(testnet=False, overwrite=True, ask_confirmation=False):
data_dir = appdirs.user_data_dir(appauthor=config.XCP_NAME, appname=config.APP_NAME, roaming=True)
# Set Constants.
if testnet:
BOOTSTRAP_URL = 'https://s3.amazonaws.com/counterparty-bootstrap/counterparty-db-testnet.latest.tar.gz'
TARBALL_PATH = os.path.join(tempfile.gettempdir(), 'counterpartyd-testnet-db.latest.tar.gz')
DATABASE_PATH = os.path.join(data_dir, '{}.testnet.db'.format(config.APP_NAME))
else:
BOOTSTRAP_URL = 'https://s3.amazonaws.com/counterparty-bootstrap/counterparty-db.latest.tar.gz'
TARBALL_PATH = os.path.join(tempfile.gettempdir(), 'counterpartyd-db.latest.tar.gz')
DATABASE_PATH = os.path.join(data_dir, '{}.db'.format(config.APP_NAME))
# Prepare Directory.
if not os.path.exists(data_dir):
os.makedirs(data_dir, mode=0o755)
if not overwrite and os.path.exists(DATABASE_PATH):
return
# Define Progress Bar.
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
s = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize: # near the end
sys.stderr.write("\n")
else: # total size is unknown
sys.stderr.write("read %d\n" % (readsofar,))
print('Downloading database from {}...'.format(BOOTSTRAP_URL))
urllib.request.urlretrieve(BOOTSTRAP_URL, TARBALL_PATH, reporthook)
print('Extracting to "%s"...' % data_dir)
with tarfile.open(TARBALL_PATH, 'r:gz') as tar_file:
tar_file.extractall(path=data_dir)
assert os.path.exists(DATABASE_PATH)
os.chmod(DATABASE_PATH, 0o660)
print('Cleaning up...')
os.remove(TARBALL_PATH)
os.remove(os.path.join(data_dir, 'checksums.txt'))
# Set default values of command line arguments with config file
def add_config_arguments(arg_parser, config_args, default_config_file, config_file_arg_name='config_file'):
cmd_args = arg_parser.parse_known_args()[0]
config_file = getattr(cmd_args, config_file_arg_name, None)
if not config_file:
config_dir = appdirs.user_config_dir(appauthor=config.XCP_NAME, appname=config.APP_NAME, roaming=True)
if not os.path.isdir(config_dir):
os.makedirs(config_dir, mode=0o755)
config_file = os.path.join(config_dir, default_config_file)
# clean BOM
BUFSIZE = 4096
BOMLEN = len(codecs.BOM_UTF8)
with codecs.open(config_file, 'r+b') as fp:
chunk = fp.read(BUFSIZE)
if chunk.startswith(codecs.BOM_UTF8):
i = 0
chunk = chunk[BOMLEN:]
while chunk:
fp.seek(i)
fp.write(chunk)
i += len(chunk)
fp.seek(BOMLEN, os.SEEK_CUR)
chunk = fp.read(BUFSIZE)
fp.seek(-BOMLEN, os.SEEK_CUR)
fp.truncate()
logger.debug('Loading configuration file: `{}`'.format(config_file))
configfile = configparser.ConfigParser()
with codecs.open(config_file, 'r', encoding='utf8') as fp:
configfile.readfp(fp)
if not 'Default' in configfile:
configfile['Default'] = {}
# Initialize default values with the config file.
for arg in config_args:
key = arg[0][-1].replace('--', '')
if 'action' in arg[1] and arg[1]['action'] == 'store_true' and key in configfile['Default']:
arg[1]['default'] = configfile['Default'].getboolean(key)
elif key in configfile['Default'] and configfile['Default'][key]:
arg[1]['default'] = configfile['Default'][key]
arg_parser.add_argument(*arg[0], **arg[1])
return arg_parser
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
|
# -*- coding: utf-8 -*-
#==============================================================================
# module : agilent_psa.py
# author : Benjamin Huard
# license : MIT license
#==============================================================================
"""
This module defines drivers for agilent PSA.
:Contains:
SpecDescriptor
AgilentPSA
"""
from inspect import cleandoc
import numpy as np
from ..driver_tools import (InstrIOError, secure_communication,
instrument_property)
from ..visa_tools import VisaInstrument
DATA_FORMATTING_DICT = {'raw I/Q data': 0,
'descriptor': 1,
'(I,Q) vs time': 3,
'log(mag) vs freq': 4,
'average of log(mag) vs freq': 7,
'mag vs freq in Vrms': 11,
'average of mag vs freq in Vrms': 12}
class SpecDescriptor():
def __init__(self):
self.initialized = False
self.FFTpeak = 0
self.FFTfreq = 0
self.FFTnbrSteps = 2
self.Firstfreq = 0
self.Freqstep = 0
self.TimenbrSteps = 2
self.firsttime = 0
self.TimeStep = 0.1
self.timedomaincheck = 1
self.totaltime = 1.0
self.averagenbr = 1
class AgilentPSA(VisaInstrument):
"""
"""
caching_permissions = {'start_frequency_SA': False,
'stop_frequency_SA': False,
'mode': False}
def __init__(self, connection_info, caching_allowed=True,
caching_permissions={}, auto_open=True):
super(AgilentPSA, self).__init__(connection_info,
caching_allowed,
caching_permissions,
auto_open)
self.write("ROSC:SOURCE EXT") # 10 MHz clock bandwidth external
self.write("ROSC:OUTP ON") # 10 MHz clock bandwidth internal ON
self.write("FORM:DATA ASCii") # lots of data must be read in
# ASCii format
self.write("FORM:BORD NORMAL") # (TO CHECK)
self.mode = self.mode # initialize PSA properly if SPEC or WAV mode
self.spec_header = SpecDescriptor()
@secure_communication(2)
def get_spec_header(self):
"""
"""
if self.mode == 'SPEC':
answer = self.ask_for_values("FETCH:SPEC1?")
if answer:
self.spec_header.initialized = True
self.spec_header.FFTpeak = answer[0]
self.spec_header.FFTfreq = answer[1]/1e9
self.spec_header.FFTnbrSteps = answer[2]
self.spec_header.Firstfreq = answer[3]/1e9
self.spec_header.Freqstep = answer[4]/1e9
self.spec_header.TimenbrSteps = answer[5]
self.spec_header.firsttime = answer[6]
self.spec_header.TimeStep = answer[7]
self.spec_header.timedomaincheck = answer[8]
self.spec_header.totaltime = answer[9]
self.spec_header.averagenbr = answer[10]
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return its
mode'''))
else:
raise '''PSA is not in Spectrum mode'''
@secure_communication()
def read_data(self, trace):
"""
"""
DATA_FORMAT = ['raw I/Q data', 'descriptor', '0', '(I,Q) vs time',
'log(mag) vs freq', '0', '0',
'average of log(mag) vs freq', '0', '0', '0',
'mag vs freq in Vrms', 'average of mag vs freq in Vrms']
if self.mode == 'SA':
# must be read in ASCii format
self.write("FORM:DATA ASCii")
# stop all the measurements
self.write(":ABORT")
# go to the "Single sweep" mode
self.write(":INIT:CONT OFF")
# initiate measurement
self.write(":INIT")
#
self.ask_for_values("SWEEP:TIME?")
self.write("*WAI") # SA waits until the averaging is done
# Loop to see when the averaging is done
while True:
try:
self.ask_for_values("SWEEP:TIME?")
break
except:
pass
data = self.ask_for_values('trace? trace{}'.format(trace))
if data:
freq = np.linspace(self.start_frequency_SA,
self.stop_frequency_SA,
self.sweep_points_SA)
return np.rec.fromarrays([freq, np.array(data)],
names=['Frequency',
DATA_FORMAT[trace]])
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
trace {} data'''.format(trace)))
elif self.mode == 'SPEC':
self.get_spec_header()
self.write("INIT:IMM;*WAI") # start the acquisition and wait until
# over
# Check how *OPC? works
self.ask("*OPC?")
data = self.ask_for_values("FETCH:SPEC{}?".format(trace))
if data:
if trace in (4, 7, 11, 12):
header = self.spec_header
stop = header.Firstfreq +\
header.Freqstep*(header.FFTnbrSteps-1)
freq = np.linspace(header.Firstfreq, stop,
header.FFTnbrSteps)
return np.rec.fromarrays([freq, np.array(data)],
names=['Freq',
DATA_FORMAT[trace]])
elif trace in (0, 3):
header = self.spec_header
stop = header.firsttime +\
header.TimeStep*(header.TimenbrSteps-1)
freq = np.linspace(header.firsttime, stop,
header.TimenbrSteps)
return np.rec.fromarrays([freq, np.array(data)],
names=['Time',
DATA_FORMAT[trace]])
else:
raise InstrIOError(cleandoc('''Wrong parameters for trace
in Agilent E4440'''))
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
trace data'''))
else:
self.get_spec_header()
self.write("INIT:IMM;*WAI") # start the acquisition and wait until
# over
#Check how *OPC? works
self.ask("*OPC?")
data = self.ask_for_values("FETCH:WAV0?") # this will get the
# (I,Q) as a function of freq
if data:
return np.rec.fromarrays([data[::2], data[1::2]],
'Q', 'I')
# one should get all the even indices (Q)
# and odd indices (I) separately
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
trace data'''))
@instrument_property
@secure_communication()
def mode(self):
"""
"""
SAorBASIC = self.ask('inst:sel?')
if SAorBASIC == 'SA':
return 'SA'
elif SAorBASIC == 'BASIC':
conf = self.ask('conf?')
if conf:
return conf # SPEC if basic mode with spectral density
# or WAV if basic mode with time domain
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return its
mode'''))
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return its
mode'''))
@mode.setter
@secure_communication()
def mode(self, value):
"""
"""
if value == 'SPEC':
self.write('INST:SEL BASIC')
self.write('CONF:SPECTRUM')
self.write("INIT:CONT ON") # set in continuous mode
self.write("SENS:SPEC:IFP WIDE") # set the wide bandWidth 80MHz
# for spectrum
self.write("SENS:SPEC:AVER OFF") # set the average off
# for spectrum
self.write("INIT:CONT OFF") # set in single sweep mode
self.write("INIT:IMM")
elif value == "WAV":
self.write('INST:SEL BASIC')
self.write('CONF:WAV')
self.write("SENS:WAV:IFP WIDE") # set the wide bandWidth 80MHz
# for timedomain
self.write("SENS:WAV:AVER OFF") # set the average off
# for timedomain
self.write("SENS:WAV:ADC:DITHER OFF") # dither signal off
self.write("INIT:CONT OFF") # set in single sweep mode
self.write("INIT:IMM")
else:
self.write('INST:SEL SA')
@instrument_property
@secure_communication()
def start_frequency_SA(self):
"""Start frequency getter method
"""
if self.mode == 'SA':
freq = self.ask_for_values('FREQ:STAR?')
if freq:
return freq[0]/1e9
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
start frequency'''))
elif self.mode == 'SPEC':
if not self.spec_header.initialized:
self.get_spec_header()
return self.spec_header.Firstfreq
else:
raise '''PSA is not in the appropriate mode to get correctly the
start frequency'''
@start_frequency_SA.setter
@secure_communication()
def start_frequency_SA(self, value):
"""Start frequency setter method
"""
if self.mode == 'SA':
self.write('FREQ:STAR {} GHz'.format(value))
result = self.ask_for_values('FREQ:STAR?')
if result:
if abs(result[0]/1e9 - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PSA did not set correctly
the start frequency'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
start frequency'''))
else:
raise '''PSA is not in the appropriate mode to set correctly the
start frequency'''
@instrument_property
@secure_communication()
def stop_frequency_SA(self):
"""Stop frequency getter method
"""
if self.mode == 'SA':
freq = self.ask_for_values('FREQ:STOP?')
if freq:
return freq[0]/1e9
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
stop frequency'''))
else:
raise '''PSA is not in the appropriate mode to get correctly the
stop frequency'''
@stop_frequency_SA.setter
@secure_communication()
def stop_frequency_SA(self, value):
"""Stop frequency setter method
"""
if self.mode == 'SA':
self.write('FREQ:STOP {} GHz'.format(value))
result = self.ask_for_values('FREQ:STOP?')
if result:
if abs(result[0]/1e9 - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PSA did not set correctly
the stop frequency'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
stop frequency'''))
else:
raise '''PSA is not in the appropriate mode to set correctly the
stop frequency'''
@instrument_property
@secure_communication()
def center_frequency(self):
"""Center frequency getter method
"""
freq = self.ask_for_values('FREQ:CENT?')
if freq:
return freq[0]/1e9
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
center frequency'''))
@center_frequency.setter
@secure_communication()
def center_frequency(self, value):
"""center frequency setter method
"""
self.write('FREQ:CENT {} GHz'.format(value))
result = self.ask_for_values('FREQ:CENT?')
if result:
if abs(result[0]/1e9 - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PSA did not set correctly the
center frequency'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
center frequency'''))
@instrument_property
@secure_communication()
def span_frequency(self):
"""Span frequency getter method
"""
if self.mode == 'SPEC':
freq = self.ask_for_values('SENS:SPEC:FREQ:SPAN?')
if freq:
return freq[0]/1e9
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
span frequency'''))
elif self.mode == 'SA':
freq = self.ask_for_values('FREQ:SPAN?')
if freq:
return freq[0]/1e9
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
span frequency'''))
else:
raise '''PSA is not in the appropriate mode to get correctly the
span frequency'''
@span_frequency.setter
@secure_communication()
def span_frequency(self, value):
"""span frequency setter method
"""
if self.mode == 'SA':
self.write('FREQ:SPAN {} GHz'.format(value))
result = self.ask_for_values('FREQ:SPAN?')
if result:
if abs(result[0]/1e9 - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PSA did not set correctly
the span frequency'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
span frequency'''))
elif self.mode == 'SPEC':
self.write('SENS:SPEC:FREQ:SPAN {} GHz'.format(value))
result = self.ask_for_values('SENS:SPEC:FREQ:SPAN?')
if result:
if abs(result[0]/1e9 - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PSA did not set correctly
the span frequency'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
span frequency'''))
else:
raise '''PSA is not in the appropriate mode to set correctly the
span frequency'''
@instrument_property
@secure_communication()
def sweep_time(self):
"""Sweep time getter method
"""
if self.mode == 'WAV':
sweep = self.ask_for_values('SENS:WAV:SWEEP:TIME?')
if sweep:
return sweep[0]
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
sweep time'''))
elif self.mode == 'SA':
sweep = self.ask_for_values('SWEEP:TIME?')
if sweep:
return sweep[0]
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
sweep time'''))
else:
raise '''PSA is not in the appropriate mode to get correctly the
sweep time'''
@sweep_time.setter
@secure_communication()
def sweep_time(self, value):
"""sweep time setter method
"""
if self.mode == 'WAV':
self.write('SENS:WAV:SWEEP:TIME {}'.format(value))
result = self.ask_for_values('SENS:WAV:SWEEP:TIME?')
if result:
if abs(result[0] - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PSA did not set correctly
the sweep time'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
sweep time'''))
elif self.mode == 'SA':
self.write('SWEEP:TIME {}'.format(value))
result = self.ask_for_values('SWEEP:TIME?')
if result:
if abs(result[0] - value)/value > 10**-12:
raise InstrIOError(cleandoc('''PSA did not set correctly
the sweep time'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
sweep time'''))
else:
raise '''PSA is not in the appropriate mode to set correctly the
sweep time'''
@instrument_property
@secure_communication()
def RBW(self):
"""
"""
if self.mode == 'WAV':
rbw = self.ask_for_values('SENS:WAV:BWIDTH?')
if rbw:
return rbw[0]
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
RBW'''))
elif self.mode == 'SPEC':
rbw = self.ask_for_values('SENS:SPEC:BWIDTH?')
if rbw:
return rbw[0]
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
RBW'''))
else:
rbw = self.ask_for_values('BWIDTH?')
if rbw:
return rbw[0]
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
channel Resolution bandwidth'''))
@RBW.setter
@secure_communication()
def RBW(self, value):
"""
"""
if self.mode == 'WAV':
self.write('SENS:WAV:BWIDTH {}'.format(value))
result = self.ask_for_values('SENS:WAV:BWIDTH?')
if result:
if abs(result[0] > value) > 10**-12:
raise InstrIOError(cleandoc('''PSA did not set correctly
the channel Resolution bandwidth'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
channel Resolution bandwidth'''))
elif self.mode == 'SPEC':
self.write('SENS:SPEC:BWIDTH {}'.format(value))
result = self.ask_for_values('SENS:SPEC:BWIDTH?')
if result:
if abs(result[0] > value) > 10**-12:
raise InstrIOError(cleandoc('''PSA did not set correctly
the channel Resolution bandwidth'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
channel Resolution bandwidth'''))
else:
self.write('BAND {}'.format(value))
result = self.ask_for_values('BWIDTH?')
if result:
if abs(result[0] > value) > 10**-12:
raise InstrIOError(cleandoc('''PSA did not set correctly
the channel Resolution bandwidth'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
channel Resolution bandwidth'''))
@instrument_property
@secure_communication()
def VBW_SA(self):
"""
"""
if self.mode == 'SA':
vbw = self.ask_for_values('BAND:VID?')
if vbw:
return vbw[0]
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
channel Video bandwidth'''))
else:
raise '''PSA is not in the appropriate mode to set correctly the
sweep time'''
@VBW_SA.setter
@secure_communication()
def VBW_SA(self, value):
"""
"""
if self.mode == 'WAV':
raise InstrIOError(cleandoc('''PSA did not set correctly the
channel Resolution bandwidth'''))
elif self.mode == 'SPEC':
raise InstrIOError(cleandoc('''PSA did not set correctly the
channel Resolution bandwidth'''))
else:
self.write('BAND:VID {}'.format(value))
result = self.ask_for_values('BAND:VID?')
if result:
if abs(result[0] > value) > 10**-12:
raise InstrIOError(cleandoc('''PSA did not set correctly
the channel Video bandwidth'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
channel Video bandwidth'''))
@instrument_property
@secure_communication()
def sweep_points_SA(self):
"""
"""
points = self.ask_for_values('SENSe:SWEep:POINts?')
if points:
return points[0]
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
sweep point number'''))
@sweep_points_SA.setter
@secure_communication()
def sweep_points_SA(self, value):
"""
"""
self.write('SENSe:SWEep:POINts {}'.format(value))
result = self.ask_for_values('SENSe:SWEep:POINts?')
if result:
if result[0] != value:
raise InstrIOError(cleandoc('''PSA did not set correctly the
sweep point number'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
sweep point number'''))
@instrument_property
@secure_communication()
def average_count_SA(self):
"""
"""
count = self.ask_for_values('AVERage:COUNt?')
if count:
return count[0]
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
average count'''))
@average_count_SA.setter
@secure_communication()
def average_count_SA(self, value):
"""
"""
self.write('AVERage:COUNt {}'.format(value))
result = self.ask_for_values('AVERage:COUNt?')
if result:
if result[0] != value:
raise InstrIOError(cleandoc('''PSA did not set correctly the
average count'''))
else:
raise InstrIOError(cleandoc('''PSA did not set correctly the
average count'''))
@instrument_property
@secure_communication()
def average_state_SA(self):
"""
"""
mode = self.ask('AVERage?')
if mode:
return mode
else:
raise InstrIOError(cleandoc('''Agilent PSA did not return the
average state'''))
@average_state_SA.setter
@secure_communication()
def average_state_SA(self, value):
"""
"""
self.write('AVERage:STATE {}'.format(value))
result = self.ask('AVERage?')
if result.lower() != value.lower()[:len(result)]:
raise InstrIOError(cleandoc('''PSA did not set correctly the
average state'''))
DRIVERS = {'AgilentPSA': AgilentPSA}
|
|
from __future__ import print_function
import unittest
import os, copy
import math
import numpy
from rdkit.six.moves import cPickle as pickle
from rdkit.six import next
from rdkit import Chem
from rdkit.Chem import rdDistGeom, ChemicalForceFields, rdMolAlign
from rdkit import RDConfig, rdBase
from rdkit.Geometry import rdGeometry as geom
from rdkit.RDLogger import logger
logger = logger()
def feq(v1, v2, tol=1.e-4):
return abs(v1 - v2) < tol
def lstEq(l1, l2, tol=1.0e-4):
ln = len(l1)
if (ln != len(l2)):
return 0
for i in range(ln):
if abs(l1[i] - l2[i]) > tol:
return 0
return 1
def compareWithOld(smilesFile, sdFile):
smiSup = Chem.SmilesMolSupplier(smilesFile, ",", 0, -1)
sdsup = Chem.SDMolSupplier(sdFile)
im = 0
for mol in smiSup:
cid = rdDistGeom.EmbedMolecule(mol, 10, 1)
omol = sdsup[im]
assert cid == 0
conf = mol.GetConformer(0)
oconf = omol.GetConformer()
nat = mol.GetNumAtoms()
for i in range(nat):
#atm = mol.GetAtomWithIdx(i)
#oatm = omol.GetAtomWithIdx(i)
pos = conf.GetAtomPosition(i)
opos = oconf.GetAtomPosition(i)
if not lstEq(pos, opos):
return 0
im += 1
return 1
def compareMatrices(bm1, bm2, map, tol=1.0e-5):
N = numpy.shape(bm1)[0]
for i in range(1, N):
for j in range(i):
l, m = map[i], map[j]
if (l < m):
l, m = m, l
if (abs(bm1[l, m] - bm2[i, j]) > tol):
return 0
if (abs(bm1[m, l] - bm2[j, i]) > tol):
return 0
return 1
def compareOrder(smi1, smi2, tol=1.0e-5):
m1 = Chem.MolFromSmiles(smi1)
m2 = Chem.MolFromSmiles(smi2)
bm1 = rdDistGeom.GetMoleculeBoundsMatrix(m1)
bm2 = rdDistGeom.GetMoleculeBoundsMatrix(m2)
map = m1.GetSubstructMatch(m2)
return compareMatrices(bm1, bm2, map, tol)
def computeDist(lst1, lst2):
res = 0.0
for i, val in enumerate(lst1):
res += (val - lst2[i]) * (val - lst2[i])
res = math.sqrt(res)
return res
def computeChiralVol(pt1, pt2, pt3, pt4):
v1 = pt1 - pt4
v2 = pt2 - pt4
v3 = pt3 - pt4
cp = v2.CrossProduct(v3)
vol = v1.DotProduct(cp)
return vol
class TestCase(unittest.TestCase):
def setUp(self):
pass
def _test0Cdk2(self):
fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'DistGeomHelpers', 'test_data',
'cis_trans_cases.csv')
ofile = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'DistGeomHelpers', 'test_data',
'embedDistOpti.sdf')
self.assertTrue(compareWithOld(fileN, ofile))
def test1Small(self):
#writer = Chem.SDWriter("test.sdf")
# single double and tripple atoms cases should not fail
mol = Chem.MolFromSmiles('O')
rdDistGeom.EmbedMolecule(mol, 10, 1)
conf = mol.GetConformer()
self.assertTrue(lstEq(conf.GetAtomPosition(0), [0.0, 0.0, 0.0]))
#writer.write(mol)
mol = Chem.MolFromSmiles('CO')
rdDistGeom.EmbedMolecule(mol, 10, 1)
conf = mol.GetConformer()
self.assertTrue(lstEq(conf.GetAtomPosition(0), [0.69192, 0.0, 0.0]))
self.assertTrue(lstEq(conf.GetAtomPosition(1), [-0.69192, 0.0, 0.0]))
#writer.write(mol)
mol = Chem.MolFromSmiles('CCC')
rdDistGeom.EmbedMolecule(mol, 10, 1)
conf = mol.GetConformer()
self.assertTrue(lstEq(conf.GetAtomPosition(0), [-1.21676, -0.2989, 0.0]))
self.assertTrue(lstEq(conf.GetAtomPosition(1), [-0.00604, 0.59337, 0.0]))
self.assertTrue(lstEq(conf.GetAtomPosition(2), [1.22281, -0.29446, 0.0]))
#writer.write(mol)
mol = Chem.MolFromSmiles('O=C=O')
rdDistGeom.EmbedMolecule(mol, 10, 1)
conf = mol.GetConformer()
#writer.write(mol)
self.assertTrue(lstEq(conf.GetAtomPosition(0), [-1.2180, -0.06088, 0.0]))
self.assertTrue(lstEq(conf.GetAtomPosition(1), [-0.00408, 0.12116, 0.0]))
self.assertTrue(lstEq(conf.GetAtomPosition(2), [1.22207, -0.060276, 0.0]))
mol = Chem.MolFromSmiles('C=C=C=C')
rdDistGeom.EmbedMolecule(mol, 10, 1)
conf = mol.GetConformer()
#writer.write(mol)
d1 = computeDist(conf.GetAtomPosition(0), conf.GetAtomPosition(1))
self.assertTrue(feq(d1, 1.31, 0.01))
d2 = computeDist(conf.GetAtomPosition(0), conf.GetAtomPosition(2))
self.assertTrue(feq(d2, 2.59, 0.05))
d3 = computeDist(conf.GetAtomPosition(0), conf.GetAtomPosition(3))
self.assertTrue(feq(d3, 3.84, 0.1))
d4 = computeDist(conf.GetAtomPosition(1), conf.GetAtomPosition(2))
self.assertTrue(feq(d4, 1.29, 0.01))
d5 = computeDist(conf.GetAtomPosition(1), conf.GetAtomPosition(3))
self.assertTrue(feq(d5, 2.54, 0.1))
d6 = computeDist(conf.GetAtomPosition(2), conf.GetAtomPosition(3))
self.assertTrue(feq(d6, 1.31, 0.01))
def test2Utils(self):
mol = Chem.MolFromSmiles('CC')
bm = rdDistGeom.GetMoleculeBoundsMatrix(mol)
self.assertTrue(bm[1, 0] > 0)
self.assertTrue(bm[0, 1] > 0)
self.assertTrue(bm[0, 1] >= bm[1, 0])
self.assertTrue(bm[1, 0] < 1.510)
self.assertTrue(bm[0, 1] > 1.510)
def test3MultiConf(self):
mol = Chem.MolFromSmiles("CC(C)(C)c(cc12)n[n]2C(=O)/C=C(N1)/COC")
cids = rdDistGeom.EmbedMultipleConfs(mol, 10, maxAttempts=30, randomSeed=100)
energies = [112.98, 103.57, 110.78, 100.40, 95.37, 101.64, 114.72, 112.65, 124.53, 107.50]
nenergies = []
for cid in cids:
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol, 10.0, cid)
ee = ff.CalcEnergy()
nenergies.append(ee)
#print(['%.2f'%x for x in nenergies])
#print(nenergies)
self.assertTrue(lstEq(energies, nenergies, tol=1e-2))
def test4OrderDependence(self):
self.assertTrue(
compareOrder("CC(C)(C)C(=O)NC(C1)CC(N2C)CCC12", "CN1C2CCC1CC(NC(=O)C(C)(C)C)C2"))
#issue 230
self.assertTrue(compareOrder("C#CC(C)(C)N(CN1)C\\N=C/1SC", "CSC1=NCN(C(C)(C)C#C)CN1"))
#issue 232
self.assertTrue(
compareOrder("CC(C)(C)C(=O)NC(C1)CC(N2C)CCC12", "CN1C2CCC1CC(NC(=O)C(C)(C)C)C2"))
def test5Issue285(self):
m = Chem.MolFromSmiles('CNC=O')
cs = rdDistGeom.EmbedMultipleConfs(m, 10)
for i, ci in enumerate(cs):
for j in range(i + 1, len(cs)):
cj = cs[j]
self.assertTrue(Chem.MolToMolBlock(m, confId=ci) != Chem.MolToMolBlock(m, confId=cj))
def test6RmsPruning(self):
smiles = [
'CC(C)CC(NC(C1[N+]CCC1)=O)C([O-])=O', 'CC(NC(CO)C(O)c1ccc([N+]([O-])=O)cc1)=O',
'CC([N+])C(NC(C)C(N1C(C=O)CCC1)=O)=O', 'CC(NC1C(O)C=C(C([O-])=O)OC1C(O)C(O)CO)=O',
'CCCC=C(NC(C1CC1(C)C)=O)C([O-])=O', 'OCC(O)C(O)C(Cn1c2c(cc(C)c(C)c2)nc-2c(=O)[nH]c(=O)nc12)O'
]
nconfs = []
expected = [5, 6, 6, 6, 6, 3]
for smi in smiles:
mol = Chem.MolFromSmiles(smi)
cids = rdDistGeom.EmbedMultipleConfs(mol, 50, maxAttempts=30, randomSeed=100,
pruneRmsThresh=1.5)
nconfs.append(len(cids))
d = [abs(x - y) for x, y in zip(expected, nconfs)]
self.assertTrue(max(d) <= 1)
def test6Chirality(self):
# turn on chirality and we should get chiral volume that is pretty consistent and
# positive
tgtVol = 13.0
smiles = "Cl[C@](C)(F)Br"
mol = Chem.MolFromSmiles(smiles)
cids = rdDistGeom.EmbedMultipleConfs(mol, 30, maxAttempts=30, randomSeed=100)
self.assertTrue(len(cids) == 30)
for cid in cids:
conf = mol.GetConformer(cid)
vol = computeChiralVol(
conf.GetAtomPosition(0),
conf.GetAtomPosition(2), conf.GetAtomPosition(3), conf.GetAtomPosition(4))
self.assertTrue(abs(vol - tgtVol) < 1)
# turn of chirality and now we should see both chiral forms
smiles = "ClC(C)(F)Br"
mol = Chem.MolFromSmiles(smiles)
cids = rdDistGeom.EmbedMultipleConfs(mol, 30, maxAttempts=30, randomSeed=120)
self.assertTrue(len(cids) == 30)
nPos = 0
nNeg = 0
for cid in cids:
conf = mol.GetConformer(cid)
vol = computeChiralVol(
conf.GetAtomPosition(0),
conf.GetAtomPosition(2), conf.GetAtomPosition(3), conf.GetAtomPosition(4))
self.assertTrue(abs(vol - tgtVol) < 1 or abs(vol + tgtVol) < 1)
if vol < 0:
nNeg += 1
else:
nPos += 1
self.assertTrue(nPos > 0)
self.assertTrue(nNeg > 0)
tgtVol = 5.0
for i in range(10):
smiles = "Cl[C@H](F)Br"
mol = Chem.MolFromSmiles(smiles)
ci = rdDistGeom.EmbedMolecule(mol, 30, (i + 1) * 10)
conf = mol.GetConformer(ci)
vol = computeChiralVol(
conf.GetAtomPosition(0),
conf.GetAtomPosition(1), conf.GetAtomPosition(2), conf.GetAtomPosition(3))
self.assertTrue(abs(vol - tgtVol) < 1, "%s %s" % (vol, tgtVol))
tgtVol = 3.5
expected = [-3.62, -3.67, -3.72, 3.91, 3.95, 3.98, 3.90, 3.94, 3.98, 3.91]
nPos = 0
nNeg = 0
for i in range(30):
smiles = "ClC(F)Br"
mol = Chem.MolFromSmiles(smiles)
ci = rdDistGeom.EmbedMolecule(mol, 30, (i + 1) * 10)
conf = mol.GetConformer(ci)
vol = computeChiralVol(
conf.GetAtomPosition(0),
conf.GetAtomPosition(1), conf.GetAtomPosition(2), conf.GetAtomPosition(3))
self.assertTrue(abs(vol - tgtVol) < 1 or abs(vol + tgtVol) < 1)
if vol < 0:
nNeg += 1
else:
nPos += 1
self.assertTrue(nPos > 0)
self.assertTrue(nNeg > 0)
smiles = "Cl[C@H](F)Br"
m = Chem.MolFromSmiles(smiles)
mol = Chem.AddHs(m)
cids = rdDistGeom.EmbedMultipleConfs(mol, 10, maxAttempts=30, randomSeed=100)
self.assertTrue(len(cids) == 10)
tgtVol = 10.5
for cid in cids:
conf = mol.GetConformer(cid)
vol = computeChiralVol(
conf.GetAtomPosition(0),
conf.GetAtomPosition(2), conf.GetAtomPosition(3), conf.GetAtomPosition(4))
self.assertTrue(abs(vol - tgtVol) < 2.)
# let's try a little more complicated system
expectedV1 = -2.0
expectedV2 = -2.9
for i in range(5):
smi = "C1=CC=C(C=C1)[C@H](OC1=C[NH]N=C1)C(=O)[NH]C[C@H](Cl)C1=CC=NC=C1"
mol = Chem.MolFromSmiles(smi)
ci = rdDistGeom.EmbedMolecule(mol, randomSeed=(i + 1) * 15)
self.assertTrue(ci >= 0)
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol, 10.0, ci)
ff.Minimize()
conf = mol.GetConformer(ci)
vol1 = computeChiralVol(
conf.GetAtomPosition(6),
conf.GetAtomPosition(3), conf.GetAtomPosition(7), conf.GetAtomPosition(13))
self.assertTrue(abs(vol1 - expectedV1) < 1 or abs(vol1 + expectedV1) < 1)
if vol1 < 0:
nNeg += 1
else:
nPos += 1
vol2 = computeChiralVol(
conf.GetAtomPosition(17),
conf.GetAtomPosition(16), conf.GetAtomPosition(18), conf.GetAtomPosition(19))
self.assertTrue(abs(vol2 - expectedV2) < 1 or abs(vol2 + expectedV2) < 1)
# remove the chiral specification and we should see other chiral
# forms of the compound
expectedV1 = 2.0 #[-2.30, -2.31, -2.30, 2.30, -1.77]
expectedV2 = 2.8 #[2.90, 2.89, 2.69, -2.90, -2.93]
self.assertTrue(nPos > 0)
self.assertTrue(nNeg > 0)
for i in range(5):
smi = "C1=CC=C(C=C1)C(OC1=C[NH]N=C1)C(=O)[NH]CC(Cl)C1=CC=NC=C1"
mol = Chem.MolFromSmiles(smi)
ci = rdDistGeom.EmbedMolecule(mol, 30, (i + 1) * 10)
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol, 10.0, ci)
ff.Minimize()
conf = mol.GetConformer(ci)
vol1 = computeChiralVol(
conf.GetAtomPosition(6),
conf.GetAtomPosition(3), conf.GetAtomPosition(7), conf.GetAtomPosition(13))
vol2 = computeChiralVol(
conf.GetAtomPosition(17),
conf.GetAtomPosition(16), conf.GetAtomPosition(18), conf.GetAtomPosition(19))
self.assertTrue(abs(abs(vol1) - expectedV1) < 1.0)
self.assertTrue(abs(abs(vol2) - expectedV2) < 1.0)
def test7ConstrainedEmbedding(self):
ofile = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'DistGeomHelpers', 'test_data',
'constrain1.sdf')
suppl = Chem.SDMolSupplier(ofile)
ref = next(suppl)
probe = copy.deepcopy(ref)
cMap = {}
for i in range(5):
cMap[i] = ref.GetConformer().GetAtomPosition(i)
ci = rdDistGeom.EmbedMolecule(probe, coordMap=cMap, randomSeed=23)
self.assertTrue(ci > -1)
algMap = list(zip(range(5), range(5)))
ssd = rdMolAlign.AlignMol(probe, ref, atomMap=algMap)
self.assertTrue(ssd < 0.1)
def test8MultiThreadMultiConf(self):
if (rdBase.rdkitBuild.split('|')[2] != "MINGW"):
ENERGY_TOLERANCE = 1.0e-6
MSD_TOLERANCE = 1.0e-6
else:
ENERGY_TOLERANCE = 1.0
MSD_TOLERANCE = 1.0e-5
mol = Chem.AddHs(Chem.MolFromSmiles("CC(C)(C)c(cc12)n[n]2C(=O)/C=C(N1)/COC"))
cids = rdDistGeom.EmbedMultipleConfs(mol, 200, maxAttempts=30, randomSeed=100)
energies = []
for cid in cids:
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol, 10.0, cid)
ee = ff.CalcEnergy()
energies.append(ee)
mol2 = Chem.AddHs(Chem.MolFromSmiles("CC(C)(C)c(cc12)n[n]2C(=O)/C=C(N1)/COC"))
cids2 = rdDistGeom.EmbedMultipleConfs(mol2, 200, maxAttempts=30, randomSeed=100, numThreads=4)
self.assertTrue(lstEq(cids, cids2))
nenergies = []
for cid in cids2:
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol2, 10.0, cid)
ee = ff.CalcEnergy()
nenergies.append(ee)
self.assertTrue(lstEq(energies, nenergies, tol=ENERGY_TOLERANCE))
for cid in cids:
msd = 0.0
for i in range(mol.GetNumAtoms()):
msd += (mol.GetConformer().GetAtomPosition(i) \
- mol2.GetConformer().GetAtomPosition(i)).LengthSq()
msd /= mol.GetNumAtoms()
self.assertTrue(msd < MSD_TOLERANCE)
def _compareConfs(self, mol, ref, molConfId, refConfId):
self.assertEqual(mol.GetNumAtoms(), ref.GetNumAtoms())
molConf = mol.GetConformer(molConfId)
refConf = ref.GetConformer(refConfId)
for i in range(mol.GetNumAtoms()):
mp = molConf.GetAtomPosition(i)
rp = refConf.GetAtomPosition(i)
self.assertAlmostEqual((mp - rp).Length(), 0.0, 3)
def test9EmbedParams(self):
mol = Chem.AddHs(Chem.MolFromSmiles('OCCC'))
fn = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'DistGeomHelpers', 'test_data',
'simple_torsion.dg.mol')
ref = Chem.MolFromMolFile(fn, removeHs=False)
params = rdDistGeom.EmbedParameters()
params.randomSeed = 42
self.assertEqual(rdDistGeom.EmbedMolecule(mol, params), 0)
self._compareConfs(mol, ref, 0, 0)
fn = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'DistGeomHelpers', 'test_data',
'simple_torsion.etdg.mol')
ref = Chem.MolFromMolFile(fn, removeHs=False)
params = rdDistGeom.EmbedParameters()
params.randomSeed = 42
params.useExpTorsionAnglePrefs = True
self.assertEqual(rdDistGeom.EmbedMolecule(mol, params), 0)
self._compareConfs(mol, ref, 0, 0)
params = rdDistGeom.ETDG()
params.randomSeed = 42
self.assertEqual(rdDistGeom.EmbedMolecule(mol, params), 0)
self._compareConfs(mol, ref, 0, 0)
fn = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'DistGeomHelpers', 'test_data',
'simple_torsion.etkdg.mol')
ref = Chem.MolFromMolFile(fn, removeHs=False)
params = rdDistGeom.EmbedParameters()
params.randomSeed = 42
params.useExpTorsionAnglePrefs = True
params.useBasicKnowledge = True
self.assertEqual(rdDistGeom.EmbedMolecule(mol, params), 0)
self._compareConfs(mol, ref, 0, 0)
params = rdDistGeom.ETKDG()
params.randomSeed = 42
self.assertEqual(rdDistGeom.EmbedMolecule(mol, params), 0)
self._compareConfs(mol, ref, 0, 0)
fn = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'DistGeomHelpers', 'test_data',
'simple_torsion.kdg.mol')
ref = Chem.MolFromMolFile(fn, removeHs=False)
params = rdDistGeom.EmbedParameters()
params.randomSeed = 42
params.useBasicKnowledge = True
self.assertEqual(rdDistGeom.EmbedMolecule(mol, params), 0)
self._compareConfs(mol, ref, 0, 0)
params = rdDistGeom.KDG()
params.randomSeed = 42
self.assertEqual(rdDistGeom.EmbedMolecule(mol, params), 0)
self._compareConfs(mol, ref, 0, 0)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""File IO methods that wrap the C++ FileSystem API.
The C++ FileSystem API is SWIG wrapped in file_io.i. These functions call those
to accomplish basic File IO operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import os
import uuid
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# A good default block size depends on the system in question.
# A somewhat conservative default chosen here.
_DEFAULT_BLOCK_SIZE = 16 * 1024 * 1024
class FileIO(object):
"""FileIO class that exposes methods to read / write to / from files.
The constructor takes the following arguments:
name: name of the file
mode: one of 'r', 'w', 'a', 'r+', 'w+', 'a+'. Append 'b' for bytes mode.
Can be used as an iterator to iterate over lines in the file.
The default buffer size used for the BufferedInputStream used for reading
the file line by line is 1024 * 512 bytes.
"""
def __init__(self, name, mode):
self.__name = name
self.__mode = mode
self._read_buf = None
self._writable_file = None
self._binary_mode = "b" in mode
mode = mode.replace("b", "")
if mode not in ("r", "w", "a", "r+", "w+", "a+"):
raise errors.InvalidArgumentError(
None, None, "mode is not 'r' or 'w' or 'a' or 'r+' or 'w+' or 'a+'")
self._read_check_passed = mode in ("r", "r+", "a+", "w+")
self._write_check_passed = mode in ("a", "w", "r+", "a+", "w+")
@property
def name(self):
"""Returns the file name."""
return self.__name
@property
def mode(self):
"""Returns the mode in which the file was opened."""
return self.__mode
def _preread_check(self):
if not self._read_buf:
if not self._read_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for reading")
with errors.raise_exception_on_not_ok_status() as status:
self._read_buf = pywrap_tensorflow.CreateBufferedInputStream(
compat.as_bytes(self.__name), 1024 * 512, status)
def _prewrite_check(self):
if not self._writable_file:
if not self._write_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for writing")
with errors.raise_exception_on_not_ok_status() as status:
self._writable_file = pywrap_tensorflow.CreateWritableFile(
compat.as_bytes(self.__name), compat.as_bytes(self.__mode), status)
def _prepare_value(self, val):
if self._binary_mode:
return compat.as_bytes(val)
else:
return compat.as_str_any(val)
def size(self):
"""Returns the size of the file."""
return stat(self.__name).length
def write(self, file_content):
"""Writes file_content to the file. Appends to the end of the file."""
self._prewrite_check()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.AppendToFile(
compat.as_bytes(file_content), self._writable_file, status)
def read(self, n=-1):
"""Returns the contents of a file as a string.
Starts reading from current position in file.
Args:
n: Read 'n' bytes if n != -1. If n = -1, reads to end of file.
Returns:
'n' bytes of the file (or whole file) in bytes mode or 'n' bytes of the
string if in string (regular) mode.
"""
self._preread_check()
with errors.raise_exception_on_not_ok_status() as status:
if n == -1:
length = self.size() - self.tell()
else:
length = n
return self._prepare_value(
pywrap_tensorflow.ReadFromStream(self._read_buf, length, status))
@deprecation.deprecated_args(
None,
"position is deprecated in favor of the offset argument.",
"position")
def seek(self, offset=None, whence=0, position=None):
# TODO(jhseu): Delete later. Used to omit `position` from docs.
# pylint: disable=g-doc-args
"""Seeks to the offset in the file.
Args:
offset: The byte count relative to the whence argument.
whence: Valid values for whence are:
0: start of the file (default)
1: relative to the current position of the file
2: relative to the end of file. offset is usually negative.
"""
# pylint: enable=g-doc-args
self._preread_check()
# We needed to make offset a keyword argument for backwards-compatibility.
# This check exists so that we can convert back to having offset be a
# positional argument.
# TODO(jhseu): Make `offset` a positional argument after `position` is
# deleted.
if offset is None and position is None:
raise TypeError("seek(): offset argument required")
if offset is not None and position is not None:
raise TypeError("seek(): offset and position may not be set "
"simultaneously.")
if position is not None:
offset = position
with errors.raise_exception_on_not_ok_status() as status:
if whence == 0:
pass
elif whence == 1:
offset += self.tell()
elif whence == 2:
offset += self.size()
else:
raise errors.InvalidArgumentError(
None, None,
"Invalid whence argument: {}. Valid values are 0, 1, or 2."
.format(whence))
ret_status = self._read_buf.Seek(offset)
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def readline(self):
r"""Reads the next line from the file. Leaves the '\n' at the end."""
self._preread_check()
return self._prepare_value(self._read_buf.ReadLineAsString())
def readlines(self):
"""Returns all lines from the file in a list."""
self._preread_check()
lines = []
while True:
s = self.readline()
if not s:
break
lines.append(s)
return lines
def tell(self):
"""Returns the current position in the file."""
self._preread_check()
return self._read_buf.Tell()
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def __iter__(self):
return self
def next(self):
retval = self.readline()
if not retval:
raise StopIteration()
return retval
def __next__(self):
return self.next()
def flush(self):
"""Flushes the Writable file.
This only ensures that the data has made its way out of the process without
any guarantees on whether it's written to disk. This means that the
data would survive an application crash but not necessarily an OS crash.
"""
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Flush()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def close(self):
"""Closes FileIO. Should be called for the WritableFile to be flushed."""
self._read_buf = None
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Close()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
self._writable_file = None
@tf_export("gfile.Exists")
def file_exists(filename):
"""Determines whether a path exists or not.
Args:
filename: string, a path
Returns:
True if the path exists, whether its a file or a directory.
False if the path does not exist and there are no filesystem errors.
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API.
"""
try:
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.FileExists(compat.as_bytes(filename), status)
except errors.NotFoundError:
return False
return True
@tf_export("gfile.Remove")
def delete_file(filename):
"""Deletes the file located at 'filename'.
Args:
filename: string, a filename
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API. E.g.,
NotFoundError if the file does not exist.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteFile(compat.as_bytes(filename), status)
def read_file_to_string(filename, binary_mode=False):
"""Reads the entire contents of a file to a string.
Args:
filename: string, path to a file
binary_mode: whether to open the file in binary mode or not. This changes
the type of the object returned.
Returns:
contents of the file as a string or bytes.
Raises:
errors.OpError: Raises variety of errors that are subtypes e.g.
NotFoundError etc.
"""
if binary_mode:
f = FileIO(filename, mode="rb")
else:
f = FileIO(filename, mode="r")
return f.read()
def write_string_to_file(filename, file_content):
"""Writes a string to a given file.
Args:
filename: string, path to a file
file_content: string, contents that need to be written to the file
Raises:
errors.OpError: If there are errors during the operation.
"""
with FileIO(filename, mode="w") as f:
f.write(file_content)
@tf_export("gfile.Glob")
def get_matching_files(filename):
"""Returns a list of files that match the given pattern(s).
Args:
filename: string or iterable of strings. The glob pattern(s).
Returns:
A list of strings containing filenames that match the given pattern(s).
Raises:
errors.OpError: If there are filesystem / directory listing errors.
"""
with errors.raise_exception_on_not_ok_status() as status:
if isinstance(filename, six.string_types):
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for matching_filename in pywrap_tensorflow.GetMatchingFiles(
compat.as_bytes(filename), status)
]
else:
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for single_filename in filename
for matching_filename in pywrap_tensorflow.GetMatchingFiles(
compat.as_bytes(single_filename), status)
]
@tf_export("gfile.MkDir")
def create_dir(dirname):
"""Creates a directory with the name 'dirname'.
Args:
dirname: string, name of the directory to be created
Notes:
The parent directories need to exist. Use recursive_create_dir instead if
there is the possibility that the parent dirs don't exist.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.CreateDir(compat.as_bytes(dirname), status)
@tf_export("gfile.MakeDirs")
def recursive_create_dir(dirname):
"""Creates a directory and all parent/intermediate directories.
It succeeds if dirname already exists and is writable.
Args:
dirname: string, name of the directory to be created
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.RecursivelyCreateDir(compat.as_bytes(dirname), status)
@tf_export("gfile.Copy")
def copy(oldpath, newpath, overwrite=False):
"""Copies data from oldpath to newpath.
Args:
oldpath: string, name of the file who's contents need to be copied
newpath: string, name of the file to which to copy to
overwrite: boolean, if false its an error for newpath to be occupied by an
existing file.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.CopyFile(
compat.as_bytes(oldpath), compat.as_bytes(newpath), overwrite, status)
@tf_export("gfile.Rename")
def rename(oldname, newname, overwrite=False):
"""Rename or move a file / directory.
Args:
oldname: string, pathname for a file
newname: string, pathname to which the file needs to be moved
overwrite: boolean, if false it's an error for `newname` to be occupied by
an existing file.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.RenameFile(
compat.as_bytes(oldname), compat.as_bytes(newname), overwrite, status)
def atomic_write_string_to_file(filename, contents, overwrite=True):
"""Writes to `filename` atomically.
This means that when `filename` appears in the filesystem, it will contain
all of `contents`. With write_string_to_file, it is possible for the file
to appear in the filesystem with `contents` only partially written.
Accomplished by writing to a temp file and then renaming it.
Args:
filename: string, pathname for a file
contents: string, contents that need to be written to the file
overwrite: boolean, if false it's an error for `filename` to be occupied by
an existing file.
"""
temp_pathname = filename + ".tmp" + uuid.uuid4().hex
write_string_to_file(temp_pathname, contents)
try:
rename(temp_pathname, filename, overwrite)
except errors.OpError:
delete_file(temp_pathname)
raise
@tf_export("gfile.DeleteRecursively")
def delete_recursively(dirname):
"""Deletes everything under dirname recursively.
Args:
dirname: string, a path to a directory
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteRecursively(compat.as_bytes(dirname), status)
@tf_export("gfile.IsDirectory")
def is_directory(dirname):
"""Returns whether the path is a directory or not.
Args:
dirname: string, path to a potential directory
Returns:
True, if the path is a directory; False otherwise
"""
status = c_api_util.ScopedTFStatus()
return pywrap_tensorflow.IsDirectory(compat.as_bytes(dirname), status)
@tf_export("gfile.ListDirectory")
def list_directory(dirname):
"""Returns a list of entries contained within a directory.
The list is in arbitrary order. It does not contain the special entries "."
and "..".
Args:
dirname: string, path to a directory
Returns:
[filename1, filename2, ... filenameN] as strings
Raises:
errors.NotFoundError if directory doesn't exist
"""
if not is_directory(dirname):
raise errors.NotFoundError(None, None, "Could not find directory")
with errors.raise_exception_on_not_ok_status() as status:
# Convert each element to string, since the return values of the
# vector of string should be interpreted as strings, not bytes.
return [
compat.as_str_any(filename)
for filename in pywrap_tensorflow.GetChildren(
compat.as_bytes(dirname), status)
]
@tf_export("gfile.Walk")
def walk(top, in_order=True):
"""Recursive directory tree generator for directories.
Args:
top: string, a Directory name
in_order: bool, Traverse in order if True, post order if False.
Errors that happen while listing directories are ignored.
Yields:
Each yield is a 3-tuple: the pathname of a directory, followed by lists of
all its subdirectories and leaf files.
(dirname, [subdirname, subdirname, ...], [filename, filename, ...])
as strings
"""
top = compat.as_str_any(top)
try:
listing = list_directory(top)
except errors.NotFoundError:
return
files = []
subdirs = []
for item in listing:
full_path = os.path.join(top, item)
if is_directory(full_path):
subdirs.append(item)
else:
files.append(item)
here = (top, subdirs, files)
if in_order:
yield here
for subdir in subdirs:
for subitem in walk(os.path.join(top, subdir), in_order):
yield subitem
if not in_order:
yield here
@tf_export("gfile.Stat")
def stat(filename):
"""Returns file statistics for a given path.
Args:
filename: string, path to a file
Returns:
FileStatistics struct that contains information about the path
Raises:
errors.OpError: If the operation fails.
"""
file_statistics = pywrap_tensorflow.FileStatistics()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.Stat(compat.as_bytes(filename), file_statistics, status)
return file_statistics
def filecmp(filename_a, filename_b):
"""Compare two files, returning True if they are the same, False otherwise.
We check size first and return False quickly if the files are different sizes.
If they are the same size, we continue to generating a crc for the whole file.
You might wonder: why not use Python's filecmp.cmp() instead? The answer is
that the builtin library is not robust to the many different filesystems
TensorFlow runs on, and so we here perform a similar comparison with
the more robust FileIO.
Args:
filename_a: string path to the first file.
filename_b: string path to the second file.
Returns:
True if the files are the same, False otherwise.
"""
size_a = FileIO(filename_a, "rb").size()
size_b = FileIO(filename_b, "rb").size()
if size_a != size_b:
return False
# Size is the same. Do a full check.
crc_a = file_crc32(filename_a)
crc_b = file_crc32(filename_b)
return crc_a == crc_b
def file_crc32(filename, block_size=_DEFAULT_BLOCK_SIZE):
"""Get the crc32 of the passed file.
The crc32 of a file can be used for error checking; two files with the same
crc32 are considered equivalent. Note that the entire file must be read
to produce the crc32.
Args:
filename: string, path to a file
block_size: Integer, process the files by reading blocks of `block_size`
bytes. Use -1 to read the file as once.
Returns:
hexadecimal as string, the crc32 of the passed file.
"""
crc = 0
with FileIO(filename, mode="rb") as f:
chunk = f.read(n=block_size)
while chunk:
crc = binascii.crc32(chunk, crc)
chunk = f.read(n=block_size)
return hex(crc & 0xFFFFFFFF)
|
|
"""
Support for reading and writing genomic intervals from delimited text files.
"""
from bx.bitset import (
BinnedBitSet,
MAX
)
from bx.tabular.io import (
ParseError,
TableReader,
TableRow,
)
class MissingFieldError(ParseError):
pass
class FieldFormatError(ParseError):
def __init__(self, *args, **kwargs):
ParseError.__init__(self, *args, **kwargs)
self.expected = kwargs.get("expected", None)
def __str__(self):
if self.expected:
return ParseError.__str__(self) + ", " + self.expected + " expected"
else:
return ParseError.__str__(self)
class StrandFormatError(ParseError):
pass
class GenomicInterval(TableRow):
"""
A genomic interval stored in a set of fields (a row of a table)
"""
def __init__(self, reader, fields, chrom_col, start_col, end_col, strand_col, default_strand, fix_strand=False):
TableRow.__init__(self, reader, fields)
self.chrom_col = chrom_col
self.start_col = start_col
self.end_col = end_col
self.strand_col = strand_col
self.nfields = nfields = len(fields)
# Parse chrom/source column
if chrom_col >= nfields:
raise MissingFieldError("No field for chrom_col (%d)" % chrom_col)
self.chrom = fields[chrom_col].strip()
# Parse start column and ensure it is an integer
if start_col >= nfields:
raise MissingFieldError("No field for start_col (%d)" % start_col)
try:
self.start = int(fields[start_col])
except ValueError as e:
raise FieldFormatError("Could not parse start_col: " + str(e), expected="integer")
# Parse end column and ensure it is an integer
if end_col >= nfields:
raise MissingFieldError("No field for end_col (%d)" % end_col)
try:
self.end = int(fields[end_col])
except ValueError as e:
raise FieldFormatError("Could not parse end_col: " + str(e), expected="integer")
# Ensure start <= end
if self.end < self.start:
raise ParseError("Start is greater than End. Interval length is < 1.")
# Parse strand and ensure it is valid
if strand_col >= nfields or strand_col < 0:
# This should probable be immutable since the fields are
# not updated when it is set
self.strand = default_strand
else:
strand = fields[strand_col]
if strand == ".":
strand = default_strand
elif strand not in ("+", "-"):
if fix_strand:
strand = "+"
else:
raise StrandFormatError("Strand must be either '+' or '-'")
self.strand = strand
def __setattr__(self, name, value):
if name == "chrom":
self.fields[self.chrom_col] = str(value)
elif name == "start":
self.fields[self.start_col] = str(value)
elif name == "end":
self.fields[self.end_col] = str(value)
elif name == "strand":
if self.strand_col < self.nfields and self.strand_col >= 0:
self.fields[self.strand_col] = str(value)
object.__setattr__(self, name, value)
def __str__(self):
return "\t".join(self.fields)
def copy(self):
return GenomicInterval(self.reader, list(self.fields), self.chrom_col, self.start_col, self.end_col, self.strand_col, self.strand)
class GenomicIntervalReader(TableReader):
"""
Reader for iterating a set of intervals in a tab separated file. Can
also parse header and comment lines if requested.
>>> from bx.tabular.io import Comment, Header
>>> r = GenomicIntervalReader( [ "#chrom\\tname\\tstart\\tend\\textra",
... "chr1\\tfoo\\t1\\t100\\txxx",
... "chr2\\tbar\\t20\\t300\\txxx",
... "#I am a comment",
... "chr2\\tbar\\t20\\t300\\txxx" ], start_col=2, end_col=3 )
>>> header = next(r)
>>> elements = list(r)
>>> elements.insert(0, header)
>>> assert isinstance(elements[0], Header)
>>> str(elements[0])
'#chrom\\tname\\tstart\\tend\\textra'
>>> assert isinstance(elements[1], GenomicInterval)
>>> print(elements[1].start, elements[1].end)
1 100
>>> str(elements[1])
'chr1\\tfoo\\t1\\t100\\txxx'
>>> elements[1].start = 30
>>> print(elements[1].start, elements[1].end)
30 100
>>> str(elements[1])
'chr1\\tfoo\\t30\\t100\\txxx'
>>> assert isinstance(elements[2], GenomicInterval)
>>> assert isinstance(elements[3], Comment)
>>> assert isinstance(elements[4], GenomicInterval)
"""
def __init__(self, input, chrom_col=0, start_col=1, end_col=2, strand_col=5,
default_strand="+", return_header=True, return_comments=True, force_header=None, fix_strand=False, comment_lines_startswith=None, allow_spaces=False):
if comment_lines_startswith is None:
comment_lines_startswith = ["#", "track "]
TableReader.__init__(self, input, return_header, return_comments, force_header, comment_lines_startswith)
self.chrom_col = chrom_col
self.start_col = start_col
self.end_col = end_col
self.strand_col = strand_col
self.default_strand = default_strand
self.fix_strand = fix_strand
self.allow_spaces = allow_spaces
def parse_row(self, line):
# Try multiple separators. First tab, our expected splitter, than
# just whitespace in the case of problematic files with space instead of
# tab separation
seps = ["\t"]
if self.allow_spaces:
seps.append(None)
for i, sep in enumerate(seps):
try:
return GenomicInterval(
self, line.split(sep), self.chrom_col, self.start_col,
self.end_col, self.strand_col, self.default_strand,
fix_strand=self.fix_strand)
except Exception as e:
# Catch and store the initial error
if i == 0:
err = e
# Ran out of separators and still have errors, raise our problem
raise err
def binned_bitsets(self, upstream_pad=0, downstream_pad=0, lens=None):
# The incoming lens dictionary is a dictionary of chromosome lengths
# which are used to initialize the bitsets.
if lens is None:
lens = {}
last_chrom = None
last_bitset = None
bitsets = dict()
for interval in self:
if isinstance(interval, GenomicInterval):
chrom = interval[self.chrom_col]
if chrom != last_chrom:
if chrom not in bitsets:
size = lens.get(chrom, MAX)
try:
bbs = BinnedBitSet(size)
except ValueError as e:
# We will only reach here when constructing this bitset from the lens dict
# since the value of MAX is always safe.
raise Exception("Invalid chrom length {} in 'lens' dictionary. {}".format(str(size), str(e)))
bitsets[chrom] = bbs
last_chrom = chrom
last_bitset = bitsets[chrom]
start = max(int(interval[self.start_col]), 0)
end = min(int(interval[self.end_col]), last_bitset.size)
last_bitset.set_range(start, end-start)
return bitsets
class NiceReaderWrapper(GenomicIntervalReader):
"""
>>> from bx.tabular.io import Header
>>> r = NiceReaderWrapper(["#chrom\\tname\\tstart\\tend\\textra",
... "chr1\\tfoo\\t1\\t100\\txxx",
... "chr2\\tbar\\t20\\t300\\txxx",
... "#I am a comment",
... "chr2\\tbar\\t20\\t300\\txxx" ], start_col=2, end_col=3 )
>>> assert isinstance(next(r), Header)
>>> assert r.current_line == '#chrom\\tname\\tstart\\tend\\textra', r.current_line
>>> assert len([_ for _ in r]) == 4
"""
def __init__(self, reader, **kwargs):
GenomicIntervalReader.__init__(self, reader, **kwargs)
self.outstream = kwargs.get("outstream", None)
self.print_delegate = kwargs.get("print_delegate", None)
self.input_wrapper = iter(self.input)
self.input_iter = self.iterwrapper()
self.skipped = 0
self.skipped_lines = []
def __iter__(self):
return self
def __next__(self):
while True:
try:
nextitem = super().__next__()
return nextitem
except ParseError as e:
if self.outstream:
if self.print_delegate and callable(self.print_delegate):
self.print_delegate(self.outstream, e, self)
self.skipped += 1
# no reason to stuff an entire bad file into memory
if self.skipped < 10:
self.skipped_lines.append((self.linenum, self.current_line, str(e)))
def iterwrapper(self):
# Generator which keeps track of the current line as an object attribute.
for self.current_line in self.input_wrapper:
yield self.current_line
class BitsetSafeReaderWrapper(NiceReaderWrapper):
def __init__(self, reader, lens=None):
# This class handles any ValueError, IndexError and OverflowError exceptions that may be thrown when
# the bitsets are being created by skipping the problem lines.
# The incoming lens dictionary is a dictionary of chromosome lengths
# which are used to initialize the bitsets.
# It is assumed that the reader is an interval reader, i.e. it has chr_col, start_col, end_col and strand_col attributes.
if lens is None:
lens = {}
NiceReaderWrapper.__init__(self, reader.input, chrom_col=reader.chrom_col, start_col=reader.start_col, end_col=reader.end_col, strand_col=reader.strand_col)
self.lens = lens
def __next__(self):
while True:
rval = super().__next__()
if isinstance(rval, GenomicInterval) and rval.end > self.lens.get(rval.chrom, MAX):
self.skipped += 1
# no reason to stuff an entire bad file into memory
if self.skipped < 10:
self.skipped_lines.append((self.linenum, self.current_line, "Error in BitsetSafeReaderWrapper"))
else:
return rval
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""An executable to expand hierarchically image-level labels and boxes.
Example usage:
python models/research/object_detection/dataset_tools/\
oid_hierarchical_labels_expansion.py \
--json_hierarchy_file=<path to JSON hierarchy> \
--input_annotations=<input csv file> \
--output_annotations=<output csv file> \
--annotation_type=<1 (for boxes) or 2 (for image-level labels)>
"""
from __future__ import print_function
import argparse
import json
def _update_dict(initial_dict, update):
"""Updates dictionary with update content.
Args:
initial_dict: initial dictionary.
update: updated dictionary.
"""
for key, value_list in update.iteritems():
if key in initial_dict:
initial_dict[key].extend(value_list)
else:
initial_dict[key] = value_list
def _build_plain_hierarchy(hierarchy, skip_root=False):
"""Expands tree hierarchy representation to parent-child dictionary.
Args:
hierarchy: labels hierarchy as JSON file.
skip_root: if true skips root from the processing (done for the case when all
classes under hierarchy are collected under virtual node).
Returns:
keyed_parent - dictionary of parent - all its children nodes.
keyed_child - dictionary of children - all its parent nodes
children - all children of the current node.
"""
all_children = []
all_keyed_parent = {}
all_keyed_child = {}
if 'Subcategory' in hierarchy:
for node in hierarchy['Subcategory']:
keyed_parent, keyed_child, children = _build_plain_hierarchy(node)
# Update is not done through dict.update() since some children have multi-
# ple parents in the hiearchy.
_update_dict(all_keyed_parent, keyed_parent)
_update_dict(all_keyed_child, keyed_child)
all_children.extend(children)
if not skip_root:
all_keyed_parent[hierarchy['LabelName']] = all_children
all_children = [hierarchy['LabelName']] + all_children
for child, _ in all_keyed_child.iteritems():
all_keyed_child[child].append(hierarchy['LabelName'])
all_keyed_child[hierarchy['LabelName']] = []
return all_keyed_parent, all_keyed_child, all_children
class OIDHierarchicalLabelsExpansion(object):
""" Main class to perform labels hierachical expansion."""
def __init__(self, hierarchy):
"""Constructor.
Args:
hierarchy: labels hierarchy as JSON object.
"""
self._hierarchy_keyed_parent, self._hierarchy_keyed_child, _ = (
_build_plain_hierarchy(hierarchy, skip_root=True))
def expand_boxes_from_csv(self, csv_row):
"""Expands a row containing bounding boxes from CSV file.
Args:
csv_row: a single row of Open Images released groundtruth file.
Returns:
a list of strings (including the initial row) corresponding to the ground
truth expanded to multiple annotation for evaluation with Open Images
Challenge 2018 metric.
"""
# Row header is expected to be exactly:
# ImageID,Source,LabelName,Confidence,XMin,XMax,YMin,YMax,IsOccluded,
# IsTruncated,IsGroupOf,IsDepiction,IsInside
cvs_row_splitted = csv_row.split(',')
assert len(cvs_row_splitted) == 13
result = [csv_row]
assert cvs_row_splitted[2] in self._hierarchy_keyed_child
parent_nodes = self._hierarchy_keyed_child[cvs_row_splitted[2]]
for parent_node in parent_nodes:
cvs_row_splitted[2] = parent_node
result.append(','.join(cvs_row_splitted))
return result
def expand_labels_from_csv(self, csv_row):
"""Expands a row containing bounding boxes from CSV file.
Args:
csv_row: a single row of Open Images released groundtruth file.
Returns:
a list of strings (including the initial row) corresponding to the ground
truth expanded to multiple annotation for evaluation with Open Images
Challenge 2018 metric.
"""
# Row header is expected to be exactly:
# ImageID,Source,LabelName,Confidence
cvs_row_splited = csv_row.split(',')
assert len(cvs_row_splited) == 4
result = [csv_row]
if int(cvs_row_splited[3]) == 1:
assert cvs_row_splited[2] in self._hierarchy_keyed_child
parent_nodes = self._hierarchy_keyed_child[cvs_row_splited[2]]
for parent_node in parent_nodes:
cvs_row_splited[2] = parent_node
result.append(','.join(cvs_row_splited))
else:
assert cvs_row_splited[2] in self._hierarchy_keyed_parent
child_nodes = self._hierarchy_keyed_parent[cvs_row_splited[2]]
for child_node in child_nodes:
cvs_row_splited[2] = child_node
result.append(','.join(cvs_row_splited))
return result
def main(parsed_args):
with open(parsed_args.json_hierarchy_file) as f:
hierarchy = json.load(f)
expansion_generator = OIDHierarchicalLabelsExpansion(hierarchy)
labels_file = False
if parsed_args.annotation_type == 2:
labels_file = True
elif parsed_args.annotation_type != 1:
print('--annotation_type expected value is 1 or 2.')
return -1
with open(parsed_args.input_annotations, 'r') as source:
with open(parsed_args.output_annotations, 'w') as target:
header = None
for line in source:
if not header:
header = line
target.writelines(header)
continue
if labels_file:
expanded_lines = expansion_generator.expand_labels_from_csv(line)
else:
expanded_lines = expansion_generator.expand_boxes_from_csv(line)
target.writelines(expanded_lines)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Hierarchically expand annotations (excluding root node).')
parser.add_argument(
'--json_hierarchy_file',
required=True,
help='Path to the file containing label hierarchy in JSON format.')
parser.add_argument(
'--input_annotations',
required=True,
help="""Path to Open Images annotations file (either bounding boxes or
image-level labels).""")
parser.add_argument(
'--output_annotations',
required=True,
help="""Path to the output file.""")
parser.add_argument(
'--annotation_type',
type=int,
required=True,
help="""Type of the input annotations: 1 - boxes, 2 - image-level
labels"""
)
args = parser.parse_args()
main(args)
|
|
# -*- coding: utf-8 -*-
import datetime
import logging
import httplib
import httplib as http # TODO: Inconsistent usage of aliased import
from dateutil.parser import parse as parse_date
from flask import request
import markupsafe
from modularodm.exceptions import ValidationError, NoResultsFound, MultipleResultsFound
from modularodm import Q
from framework import sentry
from framework.auth import utils as auth_utils
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from framework.auth.decorators import must_be_confirmed
from framework.auth.exceptions import ChangePasswordError
from framework.auth.views import send_confirm_email
from framework.auth.signals import user_merged
from framework.exceptions import HTTPError, PermissionsError
from framework.flask import redirect # VOL-aware redirect
from framework.status import push_status_message
from website import mails
from website import mailchimp_utils
from website import settings
from website.project.model import Node
from website.project.utils import PROJECT_QUERY, TOP_LEVEL_PROJECT_QUERY
from website.models import ApiOAuth2Application, ApiOAuth2PersonalToken, User
from website.oauth.utils import get_available_scopes
from website.profile import utils as profile_utils
from website.util.time import throttle_period_expired
from website.util import api_v2_url, web_url_for, paths
from website.util.sanitize import escape_html
from website.util.sanitize import strip_html
from website.views import _render_nodes
from website.addons.base import utils as addon_utils
logger = logging.getLogger(__name__)
def get_public_projects(uid=None, user=None):
user = user or User.load(uid)
# In future redesign, should be limited for users with many projects / components
nodes = Node.find_for_user(
user,
subquery=(
TOP_LEVEL_PROJECT_QUERY &
Q('is_public', 'eq', True)
)
)
return _render_nodes(list(nodes))
def get_public_components(uid=None, user=None):
user = user or User.load(uid)
# TODO: This should use User.visible_contributor_to?
# In future redesign, should be limited for users with many projects / components
nodes = list(
Node.find_for_user(
user,
subquery=(
PROJECT_QUERY &
Q('parent_node', 'ne', None) &
Q('is_public', 'eq', True)
)
)
)
return _render_nodes(nodes, show_path=True)
@must_be_logged_in
def current_user_gravatar(size=None, **kwargs):
user_id = kwargs['auth'].user._id
return get_gravatar(user_id, size=size)
def get_gravatar(uid, size=None):
return {'gravatar_url': profile_utils.get_gravatar(User.load(uid), size=size)}
def date_or_none(date):
try:
return parse_date(date)
except Exception as error:
logger.exception(error)
return None
def validate_user(data, user):
"""Check if the user in request is the user who log in """
if 'id' in data:
if data['id'] != user._id:
raise HTTPError(httplib.FORBIDDEN)
else:
# raise an error if request doesn't have user id
raise HTTPError(httplib.BAD_REQUEST, data={'message_long': '"id" is required'})
@must_be_logged_in
def resend_confirmation(auth):
user = auth.user
data = request.get_json()
validate_user(data, user)
if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE):
raise HTTPError(httplib.BAD_REQUEST,
data={'message_long': 'Too many requests. Please wait a while before sending another confirmation email.'})
try:
primary = data['email']['primary']
confirmed = data['email']['confirmed']
address = data['email']['address'].strip().lower()
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if primary or confirmed:
raise HTTPError(httplib.BAD_REQUEST, data={'message_long': 'Cannnot resend confirmation for confirmed emails'})
user.add_unconfirmed_email(address)
# TODO: This setting is now named incorrectly.
if settings.CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=address)
user.email_last_sent = datetime.datetime.utcnow()
user.save()
return _profile_view(user, is_profile=True)
@must_be_logged_in
def update_user(auth):
"""Update the logged-in user's profile."""
# trust the decorator to handle auth
user = auth.user
data = request.get_json()
validate_user(data, user)
# TODO: Expand this to support other user attributes
##########
# Emails #
##########
if 'emails' in data:
emails_list = [x['address'].strip().lower() for x in data['emails']]
if user.username.strip().lower() not in emails_list:
raise HTTPError(httplib.FORBIDDEN)
available_emails = [
each.strip().lower() for each in
user.emails + user.unconfirmed_emails
]
# removals
removed_emails = [
each.strip().lower()
for each in available_emails
if each not in emails_list
]
if user.username.strip().lower() in removed_emails:
raise HTTPError(httplib.FORBIDDEN)
for address in removed_emails:
if address in user.emails:
try:
user.remove_email(address)
except PermissionsError as e:
raise HTTPError(httplib.FORBIDDEN, e.message)
user.remove_unconfirmed_email(address)
# additions
added_emails = [
each['address'].strip().lower()
for each in data['emails']
if each['address'].strip().lower() not in available_emails
]
for address in added_emails:
try:
user.add_unconfirmed_email(address)
except (ValidationError, ValueError):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid Email')
)
# TODO: This setting is now named incorrectly.
if settings.CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=address)
############
# Username #
############
# get the first email that is set to primary and has an address
primary_email = next(
(
each for each in data['emails']
# email is primary
if each.get('primary') and each.get('confirmed')
# an address is specified (can't trust those sneaky users!)
and each.get('address')
)
)
if primary_email:
primary_email_address = primary_email['address'].strip().lower()
if primary_email_address not in [each.strip().lower() for each in user.emails]:
raise HTTPError(httplib.FORBIDDEN)
username = primary_email_address
# make sure the new username has already been confirmed
if username and username in user.emails and username != user.username:
mails.send_mail(user.username,
mails.PRIMARY_EMAIL_CHANGED,
user=user,
new_address=username)
# Remove old primary email from subscribed mailing lists
for list_name, subscription in user.mailchimp_mailing_lists.iteritems():
if subscription:
mailchimp_utils.unsubscribe_mailchimp_async(list_name, user._id, username=user.username)
user.username = username
###################
# Timezone/Locale #
###################
if 'locale' in data:
if data['locale']:
locale = data['locale'].replace('-', '_')
user.locale = locale
# TODO: Refactor to something like:
# user.timezone = data.get('timezone', user.timezone)
if 'timezone' in data:
if data['timezone']:
user.timezone = data['timezone']
user.save()
# Update subscribed mailing lists with new primary email
# TODO: move to user.save()
for list_name, subscription in user.mailchimp_mailing_lists.iteritems():
if subscription:
mailchimp_utils.subscribe_mailchimp(list_name, user._id)
return _profile_view(user, is_profile=True)
def _profile_view(profile, is_profile=False):
# TODO: Fix circular import
from website.addons.badges.util import get_sorted_user_badges
if profile and profile.is_disabled:
raise HTTPError(http.GONE)
if 'badges' in settings.ADDONS_REQUESTED:
badge_assertions = get_sorted_user_badges(profile),
badges = _get_user_created_badges(profile)
else:
# NOTE: While badges, are unused, 'assertions' and 'badges' can be
# empty lists.
badge_assertions = []
badges = []
if profile:
profile_user_data = profile_utils.serialize_user(profile, full=True, is_profile=is_profile)
return {
'profile': profile_user_data,
'assertions': badge_assertions,
'badges': badges,
'user': {
'is_profile': is_profile,
'can_edit': None, # necessary for rendering nodes
'permissions': [], # necessary for rendering nodes
},
}
raise HTTPError(http.NOT_FOUND)
def _get_user_created_badges(user):
from website.addons.badges.model import Badge
addon = user.get_addon('badges')
if addon:
return [badge for badge in Badge.find(Q('creator', 'eq', addon._id)) if not badge.is_system_badge]
return []
@must_be_logged_in
def profile_view(auth):
return _profile_view(auth.user, True)
@collect_auth
@must_be_confirmed
def profile_view_id(uid, auth):
user = User.load(uid)
is_profile = auth and auth.user == user
return _profile_view(user, is_profile)
@must_be_logged_in
def edit_profile(**kwargs):
# NOTE: This method is deprecated. Use update_user instead.
# TODO: Remove this view
user = kwargs['auth'].user
form = request.form
ret = {'response': 'success'}
if form.get('name') == 'fullname' and form.get('value', '').strip():
user.fullname = strip_html(form['value']).strip()
user.save()
ret['name'] = user.fullname
return ret
def get_profile_summary(user_id, formatter='long'):
user = User.load(user_id)
return user.get_summary(formatter)
@must_be_logged_in
def user_profile(auth, **kwargs):
user = auth.user
return {
'user_id': user._id,
'user_api_url': user.api_url,
}
@must_be_logged_in
def user_account(auth, **kwargs):
user = auth.user
user_addons = addon_utils.get_addons_by_config_type('user', user)
return {
'user_id': user._id,
'addons': user_addons,
'addons_js': collect_user_config_js([addon for addon in settings.ADDONS_AVAILABLE if 'user' in addon.configs]),
'addons_css': [],
'requested_deactivation': user.requested_deactivation
}
@must_be_logged_in
def user_account_password(auth, **kwargs):
user = auth.user
old_password = request.form.get('old_password', None)
new_password = request.form.get('new_password', None)
confirm_password = request.form.get('confirm_password', None)
try:
user.change_password(old_password, new_password, confirm_password)
user.save()
except ChangePasswordError as error:
for m in error.messages:
push_status_message(m, kind='warning', trust=False)
else:
push_status_message('Password updated successfully.', kind='success', trust=False)
return redirect(web_url_for('user_account'))
@must_be_logged_in
def user_addons(auth, **kwargs):
user = auth.user
ret = {
'addon_settings': addon_utils.get_addons_by_config_type('accounts', user),
}
accounts_addons = [addon for addon in settings.ADDONS_AVAILABLE if 'accounts' in addon.configs]
ret.update({
'addon_enabled_settings': [addon.short_name for addon in accounts_addons],
'addons_js': collect_user_config_js(accounts_addons),
'addon_capabilities': settings.ADDON_CAPABILITIES,
'addons_css': []
})
return ret
@must_be_logged_in
def user_notifications(auth, **kwargs):
"""Get subscribe data from user"""
return {
'mailing_lists': dict(auth.user.mailchimp_mailing_lists.items() + auth.user.osf_mailing_lists.items())
}
@must_be_logged_in
def oauth_application_list(auth, **kwargs):
"""Return app creation page with list of known apps. API is responsible for tying list to current user."""
app_list_url = api_v2_url('applications/')
return {
'app_list_url': app_list_url
}
@must_be_logged_in
def oauth_application_register(auth, **kwargs):
"""Register an API application: blank form view"""
app_list_url = api_v2_url('applications/') # POST request to this url
return {'app_list_url': app_list_url,
'app_detail_url': ''}
@must_be_logged_in
def oauth_application_detail(auth, **kwargs):
"""Show detail for a single OAuth application"""
client_id = kwargs.get('client_id')
# The client ID must be an active and existing record, and the logged-in user must have permission to view it.
try:
#
record = ApiOAuth2Application.find_one(Q('client_id', 'eq', client_id))
except NoResultsFound:
raise HTTPError(http.NOT_FOUND)
if record.owner != auth.user:
raise HTTPError(http.FORBIDDEN)
if record.is_active is False:
raise HTTPError(http.GONE)
app_detail_url = api_v2_url('applications/{}/'.format(client_id)) # Send request to this URL
return {'app_list_url': '',
'app_detail_url': app_detail_url}
@must_be_logged_in
def personal_access_token_list(auth, **kwargs):
"""Return token creation page with list of known tokens. API is responsible for tying list to current user."""
token_list_url = api_v2_url('tokens/')
return {
'token_list_url': token_list_url
}
@must_be_logged_in
def personal_access_token_register(auth, **kwargs):
"""Register a personal access token: blank form view"""
token_list_url = api_v2_url('tokens/') # POST request to this url
return {'token_list_url': token_list_url,
'token_detail_url': '',
'scope_options': get_available_scopes()}
@must_be_logged_in
def personal_access_token_detail(auth, **kwargs):
"""Show detail for a single personal access token"""
_id = kwargs.get('_id')
# The ID must be an active and existing record, and the logged-in user must have permission to view it.
try:
record = ApiOAuth2PersonalToken.find_one(Q('_id', 'eq', _id))
except NoResultsFound:
raise HTTPError(http.NOT_FOUND)
if record.owner != auth.user:
raise HTTPError(http.FORBIDDEN)
if record.is_active is False:
raise HTTPError(http.GONE)
token_detail_url = api_v2_url('tokens/{}/'.format(_id)) # Send request to this URL
return {'token_list_url': '',
'token_detail_url': token_detail_url,
'scope_options': get_available_scopes()}
def collect_user_config_js(addon_configs):
"""Collect webpack bundles for each of the addons' user-cfg.js modules. Return
the URLs for each of the JS modules to be included on the user addons config page.
:param list addons: List of user's addon config records.
"""
js_modules = []
for addon_config in addon_configs:
js_path = paths.resolve_addon_path(addon_config, 'user-cfg.js')
if js_path:
js_modules.append(js_path)
return js_modules
@must_be_logged_in
def user_choose_addons(**kwargs):
auth = kwargs['auth']
json_data = escape_html(request.get_json())
auth.user.config_addons(json_data, auth)
@must_be_logged_in
def user_choose_mailing_lists(auth, **kwargs):
""" Update mailing list subscription on user model and in mailchimp
Example input:
{
"Open Science Framework General": true,
...
}
"""
user = auth.user
json_data = escape_html(request.get_json())
if json_data:
for list_name, subscribe in json_data.items():
# TO DO: change this to take in any potential non-mailchimp, something like try: update_subscription(), except IndexNotFound: update_mailchimp_subscription()
if list_name == settings.OSF_HELP_LIST:
update_osf_help_mails_subscription(user=user, subscribe=subscribe)
else:
update_mailchimp_subscription(user, list_name, subscribe)
else:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long="Must provide a dictionary of the format {'mailing list name': Boolean}")
)
user.save()
all_mailing_lists = {}
all_mailing_lists.update(user.mailchimp_mailing_lists)
all_mailing_lists.update(user.osf_mailing_lists)
return {'message': 'Successfully updated mailing lists', 'result': all_mailing_lists}, 200
@user_merged.connect
def update_mailchimp_subscription(user, list_name, subscription, send_goodbye=True):
""" Update mailing list subscription in mailchimp.
:param obj user: current user
:param str list_name: mailing list
:param boolean subscription: true if user is subscribed
"""
if subscription:
mailchimp_utils.subscribe_mailchimp(list_name, user._id)
else:
try:
mailchimp_utils.unsubscribe_mailchimp_async(list_name, user._id, username=user.username, send_goodbye=send_goodbye)
except mailchimp_utils.mailchimp.ListNotSubscribedError:
raise HTTPError(http.BAD_REQUEST,
data=dict(message_short='ListNotSubscribedError',
message_long='The user is already unsubscribed from this mailing list.',
error_type='not_subscribed')
)
def mailchimp_get_endpoint(**kwargs):
"""Endpoint that the mailchimp webhook hits to check that the OSF is responding"""
return {}, http.OK
def sync_data_from_mailchimp(**kwargs):
"""Endpoint that the mailchimp webhook sends its data to"""
key = request.args.get('key')
if key == settings.MAILCHIMP_WEBHOOK_SECRET_KEY:
r = request
action = r.values['type']
list_name = mailchimp_utils.get_list_name_from_id(list_id=r.values['data[list_id]'])
username = r.values['data[email]']
try:
user = User.find_one(Q('username', 'eq', username))
except NoResultsFound:
sentry.log_exception()
sentry.log_message('A user with this username does not exist.')
raise HTTPError(404, data=dict(message_short='User not found',
message_long='A user with this username does not exist'))
if action == 'unsubscribe':
user.mailchimp_mailing_lists[list_name] = False
user.save()
elif action == 'subscribe':
user.mailchimp_mailing_lists[list_name] = True
user.save()
else:
# TODO: get tests to pass with sentry logging
# sentry.log_exception()
# sentry.log_message("Unauthorized request to the OSF.")
raise HTTPError(http.UNAUTHORIZED)
@must_be_logged_in
def impute_names(**kwargs):
name = request.args.get('name', '')
return auth_utils.impute_names(name)
def update_osf_help_mails_subscription(user, subscribe):
user.osf_mailing_lists[settings.OSF_HELP_LIST] = subscribe
user.save()
@must_be_logged_in
def serialize_names(**kwargs):
user = kwargs['auth'].user
return {
'full': user.fullname,
'given': user.given_name,
'middle': user.middle_names,
'family': user.family_name,
'suffix': user.suffix,
}
def get_target_user(auth, uid=None):
target = User.load(uid) if uid else auth.user
if target is None:
raise HTTPError(http.NOT_FOUND)
return target
def fmt_date_or_none(date, fmt='%Y-%m-%d'):
if date:
try:
return date.strftime(fmt)
except ValueError:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='Year entered must be after 1900')
)
return None
def append_editable(data, auth, uid=None):
target = get_target_user(auth, uid)
data['editable'] = auth.user == target
def serialize_social_addons(user):
ret = {}
for user_settings in user.get_addons():
config = user_settings.config
if user_settings.public_id:
ret[config.short_name] = user_settings.public_id
return ret
@collect_auth
def serialize_social(auth, uid=None, **kwargs):
target = get_target_user(auth, uid)
ret = target.social
append_editable(ret, auth, uid)
if ret['editable']:
ret['addons'] = serialize_social_addons(target)
return ret
def serialize_job(job):
return {
'institution': job.get('institution'),
'department': job.get('department'),
'title': job.get('title'),
'startMonth': job.get('startMonth'),
'startYear': job.get('startYear'),
'endMonth': job.get('endMonth'),
'endYear': job.get('endYear'),
'ongoing': job.get('ongoing', False),
}
def serialize_school(school):
return {
'institution': school.get('institution'),
'department': school.get('department'),
'degree': school.get('degree'),
'startMonth': school.get('startMonth'),
'startYear': school.get('startYear'),
'endMonth': school.get('endMonth'),
'endYear': school.get('endYear'),
'ongoing': school.get('ongoing', False),
}
def serialize_contents(field, func, auth, uid=None):
target = get_target_user(auth, uid)
ret = {
'contents': [
func(content)
for content in getattr(target, field)
]
}
append_editable(ret, auth, uid)
return ret
@collect_auth
def serialize_jobs(auth, uid=None, **kwargs):
ret = serialize_contents('jobs', serialize_job, auth, uid)
append_editable(ret, auth, uid)
return ret
@collect_auth
def serialize_schools(auth, uid=None, **kwargs):
ret = serialize_contents('schools', serialize_school, auth, uid)
append_editable(ret, auth, uid)
return ret
@must_be_logged_in
def unserialize_names(**kwargs):
user = kwargs['auth'].user
json_data = escape_html(request.get_json())
# json get can return None, use `or` here to ensure we always strip a string
user.fullname = (json_data.get('full') or '').strip()
user.given_name = (json_data.get('given') or '').strip()
user.middle_names = (json_data.get('middle') or '').strip()
user.family_name = (json_data.get('family') or '').strip()
user.suffix = (json_data.get('suffix') or '').strip()
user.save()
def verify_user_match(auth, **kwargs):
uid = kwargs.get('uid')
if uid and uid != auth.user._id:
raise HTTPError(http.FORBIDDEN)
@must_be_logged_in
def unserialize_social(auth, **kwargs):
verify_user_match(auth, **kwargs)
user = auth.user
json_data = escape_html(request.get_json())
for soc in user.SOCIAL_FIELDS.keys():
user.social[soc] = json_data.get(soc)
try:
user.save()
except ValidationError as exc:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long=exc.args[0]
))
def unserialize_job(job):
return {
'institution': job.get('institution'),
'department': job.get('department'),
'title': job.get('title'),
'startMonth': job.get('startMonth'),
'startYear': job.get('startYear'),
'endMonth': job.get('endMonth'),
'endYear': job.get('endYear'),
'ongoing': job.get('ongoing'),
}
def unserialize_school(school):
return {
'institution': school.get('institution'),
'department': school.get('department'),
'degree': school.get('degree'),
'startMonth': school.get('startMonth'),
'startYear': school.get('startYear'),
'endMonth': school.get('endMonth'),
'endYear': school.get('endYear'),
'ongoing': school.get('ongoing'),
}
def unserialize_contents(field, func, auth):
user = auth.user
json_data = escape_html(request.get_json())
setattr(
user,
field,
[
func(content)
for content in json_data.get('contents', [])
]
)
user.save()
@must_be_logged_in
def unserialize_jobs(auth, **kwargs):
verify_user_match(auth, **kwargs)
unserialize_contents('jobs', unserialize_job, auth)
# TODO: Add return value
@must_be_logged_in
def unserialize_schools(auth, **kwargs):
verify_user_match(auth, **kwargs)
unserialize_contents('schools', unserialize_school, auth)
# TODO: Add return value
@must_be_logged_in
def request_export(auth):
user = auth.user
if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE):
raise HTTPError(httplib.BAD_REQUEST,
data={'message_long': 'Too many requests. Please wait a while before sending another account export request.',
'error_type': 'throttle_error'})
mails.send_mail(
to_addr=settings.SUPPORT_EMAIL,
mail=mails.REQUEST_EXPORT,
user=auth.user,
)
user.email_last_sent = datetime.datetime.utcnow()
user.save()
return {'message': 'Sent account export request'}
@must_be_logged_in
def request_deactivation(auth):
user = auth.user
if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE):
raise HTTPError(http.BAD_REQUEST,
data={
'message_long': 'Too many requests. Please wait a while before sending another account deactivation request.',
'error_type': 'throttle_error'
})
mails.send_mail(
to_addr=settings.SUPPORT_EMAIL,
mail=mails.REQUEST_DEACTIVATION,
user=auth.user,
)
user.email_last_sent = datetime.datetime.utcnow()
user.requested_deactivation = True
user.save()
return {'message': 'Sent account deactivation request'}
def redirect_to_twitter(twitter_handle):
"""Redirect GET requests for /@TwitterHandle/ to respective the OSF user
account if it associated with an active account
:param uid: uid for requested User
:return: Redirect to User's Twitter account page
"""
try:
user = User.find_one(Q('social.twitter', 'iexact', twitter_handle))
except NoResultsFound:
raise HTTPError(http.NOT_FOUND, data={
'message_short': 'User Not Found',
'message_long': 'There is no active user associated with the Twitter handle: {0}.'.format(twitter_handle)
})
except MultipleResultsFound:
users = User.find(Q('social.twitter', 'iexact', twitter_handle))
message_long = 'There are multiple OSF accounts associated with the ' \
'Twitter handle: <strong>{0}</strong>. <br /> Please ' \
'select from the accounts below. <br /><ul>'.format(markupsafe.escape(twitter_handle))
for user in users:
message_long += '<li><a href="{0}">{1}</a></li>'.format(user.url, markupsafe.escape(user.fullname))
message_long += '</ul>'
raise HTTPError(http.MULTIPLE_CHOICES, data={
'message_short': 'Multiple Users Found',
'message_long': message_long
})
return redirect(user.url)
|
|
import unicodedata
from django import forms
from django.contrib.auth import (
authenticate, get_user_model, password_validation,
)
from django.contrib.auth.hashers import (
UNUSABLE_PASSWORD_PREFIX, identify_hasher,
)
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.text import capfirst
from django.utils.translation import gettext, gettext_lazy as _
UserModel = get_user_model()
class ReadOnlyPasswordHashWidget(forms.Widget):
template_name = 'auth/widgets/read_only_password_hash.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
summary = []
if not value or value.startswith(UNUSABLE_PASSWORD_PREFIX):
summary.append({'label': gettext("No password set.")})
else:
try:
hasher = identify_hasher(value)
except ValueError:
summary.append({'label': gettext("Invalid password format or unknown hashing algorithm.")})
else:
for key, value_ in hasher.safe_summary(value).items():
summary.append({'label': gettext(key), 'value': value_})
context['summary'] = summary
return context
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super().__init__(*args, **kwargs)
def bound_data(self, data, initial):
# Always return initial because the widget doesn't
# render an input field.
return initial
def has_changed(self, initial, data):
return False
class UsernameField(forms.CharField):
def to_python(self, value):
return unicodedata.normalize('NFKC', super().to_python(value))
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput,
help_text=password_validation.password_validators_help_text_html(),
)
password2 = forms.CharField(
label=_("Password confirmation"),
widget=forms.PasswordInput,
strip=False,
help_text=_("Enter the same password as before, for verification."),
)
class Meta:
model = User
fields = ("username",)
field_classes = {'username': UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._meta.model.USERNAME_FIELD in self.fields:
self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update({'autofocus': True})
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
self.instance.username = self.cleaned_data.get('username')
password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)
return password2
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField(
label=_("Password"),
help_text=_(
"Raw passwords are not stored, so there is no way to see this "
"user's password, but you can change the password using "
"<a href=\"../password/\">this form</a>."
),
)
class Meta:
model = User
fields = '__all__'
field_classes = {'username': UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
f = self.fields.get('user_permissions')
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = UsernameField(
max_length=254,
widget=forms.TextInput(attrs={'autofocus': True}),
)
password = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput,
)
error_messages = {
'invalid_login': _(
"Please enter a correct %(username)s and password. Note that both "
"fields may be case-sensitive."
),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super().__init__(*args, **kwargs)
# Set the label for the "username" field.
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if self.fields['username'].label is None:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password:
self.user_cache = authenticate(self.request, username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("Email"), max_length=254)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Send a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.send()
def get_users(self, email):
"""Given an email, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
"""
active_users = UserModel._default_manager.filter(**{
'%s__iexact' % UserModel.get_email_field_name(): email,
'is_active': True,
})
return (u for u in active_users if u.has_usable_password())
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None,
extra_email_context=None):
"""
Generate a one-use only link for resetting password and send it to the
user.
"""
email = self.cleaned_data["email"]
for user in self.get_users(email):
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
context = {
'email': email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
}
if extra_email_context is not None:
context.update(extra_email_context)
self.send_mail(
subject_template_name, email_template_name, context, from_email,
email, html_email_template_name=html_email_template_name,
)
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set their password without entering the old
password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(
label=_("New password"),
widget=forms.PasswordInput,
strip=False,
help_text=password_validation.password_validators_help_text_html(),
)
new_password2 = forms.CharField(
label=_("New password confirmation"),
strip=False,
widget=forms.PasswordInput,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
password = self.cleaned_data["new_password1"]
self.user.set_password(password)
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change their password by entering their old
password.
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. Please enter it again."),
})
old_password = forms.CharField(
label=_("Old password"),
strip=False,
widget=forms.PasswordInput(attrs={'autofocus': True}),
)
field_order = ['old_password', 'new_password1', 'new_password2']
def clean_old_password(self):
"""
Validate that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
required_css_class = 'required'
password1 = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(attrs={'autofocus': True}),
strip=False,
help_text=password_validation.password_validators_help_text_html(),
)
password2 = forms.CharField(
label=_("Password (again)"),
widget=forms.PasswordInput,
strip=False,
help_text=_("Enter the same password as before, for verification."),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
"""Save the new password."""
password = self.cleaned_data["password1"]
self.user.set_password(password)
if commit:
self.user.save()
return self.user
@property
def changed_data(self):
data = super().changed_data
for name in self.fields.keys():
if name not in data:
return []
return ['password']
|
|
# -- coding: utf-8 --
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" General helper functions
for use in the configuration process
"""
import subprocess
import logging
import os.path
import re
import shutil
from netaddr import *
from pprint import pprint
PUBLIC_INTERFACES = {"router" : "eth2", "vpcrouter" : "eth1"}
STATE_COMMANDS = {"router" : "ip addr | grep eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo \"MASTER\"; else echo \"BACKUP\"; fi'",
"vpcrouter" : "ip addr | grep eth1 | grep state | awk '{print $9;}' | xargs bash -c 'if [ $0 == \"UP\" ]; then echo \"MASTER\"; else echo \"BACKUP\"; fi'"}
def reconfigure_interfaces(router_config, interfaces):
for interface in interfaces:
cmd = "ip link show %s | grep 'state DOWN'" % interface.get_device()
for device in execute(cmd):
if " DOWN " in device:
cmd = "ip link set %s up" % interface.get_device()
# If redundant only bring up public interfaces that are not eth1.
# Reason: private gateways are public interfaces.
# master.py and keepalived will deal with eth1 public interface.
if router_config.is_redundant() and interface.is_public():
state_cmd = STATE_COMMANDS[router_config.get_type()]
logging.info("Check state command => %s" % state_cmd)
state = execute(state_cmd)[0]
logging.info("Route state => %s" % state)
if interface.get_device() != PUBLIC_INTERFACES[router_config.get_type()] and state == "MASTER":
execute(cmd)
else:
execute(cmd)
def is_mounted(name):
for i in execute("mount"):
vals = i.lstrip().split()
if vals[0] == "tmpfs" and vals[2] == name:
return True
return False
def mount_tmpfs(name):
if not is_mounted(name):
execute("mount tmpfs %s -t tmpfs" % name)
def umount_tmpfs(name):
if is_mounted(name):
execute("umount %s" % name)
def rm(name):
os.remove(name) if os.path.isfile(name) else None
def rmdir(name):
if name:
shutil.rmtree(name, True)
def mkdir(name, mode, fatal):
try:
os.makedirs(name, mode)
except OSError as e:
if e.errno != 17:
print "failed to make directories " + name + " due to :" + e.strerror
if(fatal):
sys.exit(1)
def updatefile(filename, val, mode):
""" add val to file """
handle = open(filename, 'r')
for line in handle.read():
if line.strip().lstrip() == val:
return
# set the value
handle.close()
handle = open(filename, mode)
handle.write(val)
handle.close()
def bool_to_yn(val):
if val:
return "yes"
return "no"
def get_device_info():
""" Returns all devices on system with their ipv4 ip netmask """
list = []
for i in execute("ip addr show"):
vals = i.strip().lstrip().rstrip().split()
if vals[0] == "inet":
to = {}
to['ip'] = vals[1]
to['dev'] = vals[-1]
to['network'] = IPNetwork(to['ip'])
to['dnsmasq'] = False
list.append(to)
return list
def get_domain():
for line in open("/etc/resolv.conf"):
vals = line.lstrip().split()
if vals[0] == "domain":
return vals[1]
return "cloudnine.internal"
def get_device(ip):
""" Returns the device which has a specific ip
If the ip is not found returns an empty string
"""
for i in execute("ip addr show"):
vals = i.strip().lstrip().rstrip().split()
if vals[0] == "inet":
if vals[1].split('/')[0] == ip:
return vals[-1]
return ""
def get_ip(device):
""" Return first ip on an interface """
cmd = "ip addr show dev %s" % device
for i in execute(cmd):
vals = i.lstrip().split()
if (vals[0] == 'inet'):
return vals[1]
return ""
def definedinfile(filename, val):
""" Check if val is defined in the file """
for line in open(filename):
if re.search(val, line):
return True
return False
def addifmissing(filename, val):
""" Add something to a file
if it is not already there """
if not os.path.isfile(filename):
logging.debug("File %s doesn't exist, so create" % filename)
open(filename, "w").close()
if not definedinfile(filename, val):
updatefile(filename, val + "\n", "a")
logging.debug("Added %s to file %s" % (val, filename))
return True
return False
def get_hostname():
for line in open("/etc/hostname"):
return line.strip()
def execute(command):
""" Execute command """
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
p.wait()
rc = p.returncode
logging.debug("Executed: %s - exitstatus=%s " % (command, rc))
result = p.communicate()[0]
return result.splitlines()
def save_iptables(command, iptables_file):
""" Execute command """
logging.debug("Saving iptables for %s" % command)
result = execute(command)
fIptables = open(iptables_file, "w+")
for line in result:
fIptables.write(line)
fIptables.write("\n")
fIptables.close()
def execute2(command):
""" Execute command """
logging.debug("Executing: %s" % command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
p.wait()
return p
def service(name, op):
execute("service %s %s" % (name, op))
logging.info("Service %s %s" % (name, op))
def start_if_stopped(name):
ret = execute2("service %s status" % name)
if ret.returncode:
execute2("service %s start" % name)
def hup_dnsmasq(name, user):
pid = ""
for i in execute("ps -ef | grep %s" % name):
vals = i.lstrip().split()
if (vals[0] == user):
pid = vals[1]
if pid:
logging.info("Sent hup to %s", name)
execute("kill -HUP %s" % pid)
else:
service("dnsmasq", "start")
def copy_if_needed(src, dest):
""" Copy a file if the destination does not already exist
"""
if os.path.isfile(dest):
return
copy(src, dest)
def copy(src, dest):
"""
copy source to destination.
"""
try:
shutil.copy2(src, dest)
except IOError:
logging.Error("Could not copy %s to %s" % (src, dest))
else:
logging.info("Copied %s to %s" % (src, dest))
|
|
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import urlparse
from collections import Callable, defaultdict
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
"""
<http://stackoverflow.com/questions/6190331/can-i-do-an-ordered-default-dict-in-python>
"""
class DefaultOrderedDict(OrderedDict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('first argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True):
"""
Kind of like urlparse.parse_qs, except returns an ordered dict.
Also avoids replicating that function's bad habit of overriding the
built-in 'dict' type.
Taken from below with modification:
<https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
"""
od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list)
for name, value in urlparse.parse_qsl(qs, keep_blank_values, strict_parsing):
od[name].append(value)
return od
"""
Recipe from <http://code.activestate.com/recipes/577197-sortedcollection/>.
"""
from bisect import bisect_left, bisect_right
class SortedCollection(object):
'''Sequence sorted by a key function.
SortedCollection() is much easier to work with than using bisect() directly.
It supports key functions like those use in sorted(), min(), and max().
The result of the key function call is saved so that keys can be searched
efficiently.
Instead of returning an insertion-point which can be hard to interpret, the
five find-methods return a specific item in the sequence. They can scan for
exact matches, the last item less-than-or-equal to a key, or the first item
greater-than-or-equal to a key.
Once found, an item's ordinal position can be located with the index() method.
New items can be added with the insert() and insert_right() methods.
Old items can be deleted with the remove() method.
The usual sequence methods are provided to support indexing, slicing,
length lookup, clearing, copying, forward and reverse iteration, contains
checking, item counts, item removal, and a nice looking repr.
Finding and indexing are O(log n) operations while iteration and insertion
are O(n). The initial sort is O(n log n).
The key function is stored in the 'key' attibute for easy introspection or
so that you can assign a new key function (triggering an automatic re-sort).
In short, the class was designed to handle all of the common use cases for
bisect but with a simpler API and support for key functions.
>>> from pprint import pprint
>>> from operator import itemgetter
>>> s = SortedCollection(key=itemgetter(2))
>>> for record in [
... ('roger', 'young', 30),
... ('angela', 'jones', 28),
... ('bill', 'smith', 22),
... ('david', 'thomas', 32)]:
... s.insert(record)
>>> pprint(list(s)) # show records sorted by age
[('bill', 'smith', 22),
('angela', 'jones', 28),
('roger', 'young', 30),
('david', 'thomas', 32)]
>>> s.find_le(29) # find oldest person aged 29 or younger
('angela', 'jones', 28)
>>> s.find_lt(28) # find oldest person under 28
('bill', 'smith', 22)
>>> s.find_gt(28) # find youngest person over 28
('roger', 'young', 30)
>>> r = s.find_ge(32) # find youngest person aged 32 or older
>>> s.index(r) # get the index of their record
3
>>> s[3] # fetch the record at that index
('david', 'thomas', 32)
>>> s.key = itemgetter(0) # now sort by first name
>>> pprint(list(s))
[('angela', 'jones', 28),
('bill', 'smith', 22),
('david', 'thomas', 32),
('roger', 'young', 30)]
'''
def __init__(self, iterable=(), key=None):
self._given_key = key
key = (lambda x: x) if key is None else key
decorated = sorted((key(item), item) for item in iterable)
self._keys = [k for k, item in decorated]
self._items = [item for k, item in decorated]
self._key = key
def _getkey(self):
return self._key
def _setkey(self, key):
if key is not self._key:
self.__init__(self._items, key=key)
def _delkey(self):
self._setkey(None)
key = property(_getkey, _setkey, _delkey, 'key function')
def clear(self):
self.__init__([], self._key)
def copy(self):
return self.__class__(self, self._key)
def __len__(self):
return len(self._items)
def __getitem__(self, i):
return self._items[i]
def __iter__(self):
return iter(self._items)
def __reversed__(self):
return reversed(self._items)
def __repr__(self):
return '%s(%r, key=%s)' % (
self.__class__.__name__,
self._items,
getattr(self._given_key, '__name__', repr(self._given_key))
)
def __reduce__(self):
return self.__class__, (self._items, self._given_key)
def __contains__(self, item):
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return item in self._items[i:j]
def index(self, item):
'Find the position of an item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].index(item) + i
def count(self, item):
'Return number of occurrences of item'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].count(item)
def insert(self, item):
'Insert a new item. If equal keys are found, add to the left'
k = self._key(item)
i = bisect_left(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def insert_right(self, item):
'Insert a new item. If equal keys are found, add to the right'
k = self._key(item)
i = bisect_right(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def remove(self, item):
'Remove first occurence of item. Raise ValueError if not found'
i = self.index(item)
del self._keys[i]
del self._items[i]
def find(self, item):
'Return first item with a key == item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_left(self._keys, k)
if i != len(self) and self._keys[i] == k:
return self._items[i]
raise ValueError('No item found with key equal to: %r' % (k,))
def find_le(self, item):
'Return last item with a key <= item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_right(self._keys, k)
if i:
return self._items[i - 1]
raise ValueError('No item found with key at or below: %r' % (k,))
def find_lt(self, item):
'Return last item with a key < item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_left(self._keys, k)
if i:
return self._items[i - 1]
raise ValueError('No item found with key below: %r' % (k,))
def find_ge(self, item):
'Return first item with a key >= equal to item. Raise ValueError if not found'
k = self._key(item)
i = bisect_left(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key at or above: %r' % (k,))
def find_gt(self, item):
'Return first item with a key > item. Raise ValueError if not found'
k = self._key(item)
i = bisect_right(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key above: %r' % (k,))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# TODO(b/65403806): Switch loss_reduction default to SUM_OVER_BATCH_SIZE.
def multi_class_head(n_classes,
weight_column=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM,
loss_fn=None,
name=None):
"""Creates a `_Head` for multi class classification.
Uses `sparse_softmax_cross_entropy` loss.
The head expects `logits` with shape `[D0, D1, ... DN, n_classes]`.
In many applications, the shape is `[batch_size, n_classes]`.
`labels` must be a dense `Tensor` with shape matching `logits`, namely
`[D0, D1, ... DN, 1]`. If `label_vocabulary` given, `labels` must be a string
`Tensor` with values from the vocabulary. If `label_vocabulary` is not given,
`labels` must be an integer `Tensor` with values specifying the class index.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, or `[D0, D1, ... DN, 1]`.
The loss is the weighted sum over the input dimensions. Namely, if the input
labels have shape `[batch_size, 1]`, the loss is the weighted sum over
`batch_size`.
Also supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[D0, D1, ... DN, 1]`. `loss_fn` must support integer `labels` with
shape `[D0, D1, ... DN, 1]`. Namely, the head applies `label_vocabulary` to
the input labels before passing them to `loss_fn`.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`binary_classification_head`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_vocabulary: A list or tuple of strings representing possible label
values. If it is not given, that means labels are already encoded as an
integer within [0, n_classes). If given, labels must be of string type and
have any value in `label_vocabulary`. Note that errors will be raised if
`label_vocabulary` is not provided but labels are strings.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM`.
loss_fn: Optional loss function.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for multi class classification.
Raises:
ValueError: if `n_classes`, `label_vocabulary` or `loss_reduction` is
invalid.
"""
return head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint:disable=protected-access
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction,
loss_fn=loss_fn,
name=name)
def binary_classification_head(
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM,
loss_fn=None,
name=None):
"""Creates a `_Head` for single label binary classification.
This head uses `sigmoid_cross_entropy_with_logits` loss.
The head expects `logits` with shape `[D0, D1, ... DN, 1]`.
In many applications, the shape is `[batch_size, 1]`.
`labels` must be a dense `Tensor` with shape matching `logits`, namely
`[D0, D1, ... DN, 1]`. If `label_vocabulary` given, `labels` must be a string
`Tensor` with values from the vocabulary. If `label_vocabulary` is not given,
`labels` must be float `Tensor` with values in the interval `[0, 1]`.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, or `[D0, D1, ... DN, 1]`.
The loss is the weighted sum over the input dimensions. Namely, if the input
labels have shape `[batch_size, 1]`, the loss is the weighted sum over
`batch_size`.
Also supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[D0, D1, ... DN, 1]`. `loss_fn` must support float `labels` with
shape `[D0, D1, ... DN, 1]`. Namely, the head applies `label_vocabulary` to
the input labels before passing them to `loss_fn`.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
thresholds: Iterable of floats in the range `(0, 1)`. For binary
classification metrics such as precision and recall, an eval metric is
generated for each threshold value. This threshold is applied to the
logistic values to determine the binary classification (i.e., above the
threshold is `true`, below is `false`.
label_vocabulary: A list or tuple of strings representing possible label
values. If it is not given, labels must be float with values within
[0, 1]. If given, labels must be string type and have any value in
`label_vocabulary`. Note that errors will be raised if `label_vocabulary`
is not provided but labels are strings.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM`.
loss_fn: Optional loss function.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for binary classification.
Raises:
ValueError: If `thresholds` contains a value outside of `(0, 1)`.
ValueError: If `loss_reduction` is invalid.
"""
return head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint:disable=protected-access
weight_column=weight_column,
thresholds=thresholds,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction,
loss_fn=loss_fn,
name=name)
def regression_head(weight_column=None,
label_dimension=1,
loss_reduction=losses.Reduction.SUM,
loss_fn=None,
inverse_link_fn=None,
name=None):
"""Creates a `_Head` for regression using the `mean_squared_error` loss.
The loss is the weighted sum over all input dimensions. Namely, if the input
labels have shape `[batch_size, label_dimension]`, the loss is the weighted
sum over both `batch_size` and `label_dimension`.
The head expects `logits` with shape `[D0, D1, ... DN, label_dimension]`.
In many applications, the shape is `[batch_size, label_dimension]`.
The `labels` shape must match `logits`, namely
`[D0, D1, ... DN, label_dimension]`. If `label_dimension=1`, shape
`[D0, D1, ... DN]` is also supported.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, `[D0, D1, ... DN, 1]` or
`[D0, D1, ... DN, label_dimension]`.
Supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[D0, D1, ... DN, label_dimension]`.
Also supports custom `inverse_link_fn`, also known as 'mean function'.
`inverse_link_fn` takes `logits` as argument and returns predicted values.
This function is the inverse of the link function defined in
https://en.wikipedia.org/wiki/Generalized_linear_model#Link_function
Namely, for poisson regression, set `inverse_link_fn=tf.exp`.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM`.
loss_fn: Optional loss function. Defaults to `mean_squared_error`.
inverse_link_fn: Optional inverse link function, also known as 'mean
function'. Defaults to identity.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for linear regression.
Raises:
ValueError: If `label_dimension` or `loss_reduction` is invalid.
"""
return head_lib._regression_head_with_mean_squared_error_loss( # pylint:disable=protected-access
weight_column=weight_column,
label_dimension=label_dimension,
loss_reduction=loss_reduction,
loss_fn=loss_fn,
inverse_link_fn=inverse_link_fn,
name=name)
def poisson_regression_head(
weight_column=None,
label_dimension=1,
loss_reduction=losses.Reduction.SUM,
compute_full_loss=True,
name=None):
"""Creates a `_Head` for poisson regression using `tf.nn.log_poisson_loss`.
The loss is the weighted sum over all input dimensions. Namely, if the input
labels have shape `[batch_size, label_dimension]`, the loss is the weighted
sum over both `batch_size` and `label_dimension`.
The head expects `logits` with shape `[D0, D1, ... DN, label_dimension]`.
In many applications, the shape is `[batch_size, label_dimension]`.
The `labels` shape must match `logits`, namely
`[D0, D1, ... DN, label_dimension]`. If `label_dimension=1`, shape
`[D0, D1, ... DN]` is also supported.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, `[D0, D1, ... DN, 1]` or
`[D0, D1, ... DN, label_dimension]`.
This is implemented as a generalized linear model, see
https://en.wikipedia.org/wiki/Generalized_linear_model.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM`.
compute_full_loss: Whether to include the constant `log(z!)` term in
computing the poisson loss. See `tf.nn.log_poisson_loss` for the full
documentation.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for poisson regression.
Raises:
ValueError: If `label_dimension` or `loss_reduction` is invalid.
"""
def _poisson_loss(labels, logits):
return nn.log_poisson_loss(
targets=labels, log_input=logits, compute_full_loss=compute_full_loss)
return head_lib._regression_head_with_mean_squared_error_loss( # pylint:disable=protected-access
weight_column=weight_column,
label_dimension=label_dimension,
loss_reduction=loss_reduction,
loss_fn=_poisson_loss,
inverse_link_fn=math_ops.exp,
name=name)
def multi_label_head(n_classes,
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM,
loss_fn=None,
name=None):
"""Creates a `_Head` for multi-label classification.
Multi-label classification handles the case where each example may have zero
or more associated labels, from a discrete set. This is distinct from
`multi_class_head` which has exactly one label per example.
Uses `sigmoid_cross_entropy` loss average over classes and weighted sum over
the batch. Namely, if the input logits have shape `[batch_size, n_classes]`,
the loss is the average over `n_classes` and the weighted sum over
`batch_size`.
The head expects `logits` with shape `[D0, D1, ... DN, n_classes]`. In many
applications, the shape is `[batch_size, n_classes]`.
Labels can be:
* A multi-hot tensor of shape `[D0, D1, ... DN, n_classes]`
* An integer `SparseTensor` of class indices. The `dense_shape` must be
`[D0, D1, ... DN, ?]` and the values within `[0, n_classes)`.
* If `label_vocabulary` is given, a string `SparseTensor`. The `dense_shape`
must be `[D0, D1, ... DN, ?]` and the values within `label_vocabulary`.
If `weight_column` is specified, weights must be of shape
`[D0, D1, ... DN]`, or `[D0, D1, ... DN, 1]`.
Also supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[D0, D1, ... DN, 1]`. `loss_fn` must support indicator `labels` with
shape `[D0, D1, ... DN, n_classes]`. Namely, the head applies
`label_vocabulary` to the input labels before passing them to `loss_fn`.
Args:
n_classes: Number of classes, must be greater than 1 (for 1 class, use
`binary_classification_head`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. Per-class weighting is
not supported.
thresholds: Iterable of floats in the range `(0, 1)`. Accuracy, precision
and recall metrics are evaluated for each threshold value. The threshold
is applied to the predicted probabilities, i.e. above the threshold is
`true`, below is `false`.
label_vocabulary: A list of strings represents possible label values. If it
is not given, that means labels are already encoded as integer within
[0, n_classes) or multi-hot Tensor. If given, labels must be SparseTensor
string type and have any value in `label_vocabulary`. Also there will be
errors if vocabulary is not provided and labels are string.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch. Defaults to `SUM`.
loss_fn: Optional loss function.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for multi-label classification.
Raises:
ValueError: if `n_classes`, `thresholds`, `loss_reduction` or `loss_fn` is
invalid.
"""
thresholds = tuple(thresholds) if thresholds else tuple()
if n_classes is None or n_classes < 2:
raise ValueError(
'n_classes must be > 1 for multi-class classification. '
'Given: {}'.format(n_classes))
for threshold in thresholds:
if (threshold <= 0.0) or (threshold >= 1.0):
raise ValueError(
'thresholds must be in (0, 1) range. Given: {}'.format(threshold))
if label_vocabulary is not None:
if not isinstance(label_vocabulary, (list, tuple)):
raise ValueError(
'label_vocabulary must be a list or tuple. '
'Given type: {}'.format(type(label_vocabulary)))
if len(label_vocabulary) != n_classes:
raise ValueError(
'Length of label_vocabulary must be n_classes ({}). '
'Given: {}'.format(n_classes, len(label_vocabulary)))
if loss_fn:
head_lib._validate_loss_fn_args(loss_fn) # pylint:disable=protected-access
if (loss_reduction not in losses.Reduction.all() or
loss_reduction == losses.Reduction.NONE):
raise ValueError('Invalid loss_reduction: {}'.format(loss_reduction))
return _MultiLabelHead(
n_classes=n_classes, weight_column=weight_column, thresholds=thresholds,
label_vocabulary=label_vocabulary, loss_reduction=loss_reduction,
loss_fn=loss_fn, name=name)
class _MultiLabelHead(head_lib._Head): # pylint:disable=protected-access
"""`_Head` for multi-label classification."""
def __init__(self,
n_classes,
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=losses.Reduction.SUM,
loss_fn=None,
name=None):
self._n_classes = n_classes
self._weight_column = weight_column
self._thresholds = thresholds
self._label_vocabulary = label_vocabulary
self._loss_reduction = loss_reduction
self._loss_fn = loss_fn
self._name = name
@property
def name(self):
return self._name
@property
def logits_dimension(self):
return self._n_classes
def _process_labels(self, labels):
if labels is None:
raise ValueError(
'You must provide a labels Tensor. Given: None. '
'Suggested troubleshooting steps: Check that your data contain '
'your label feature. Check that your input_fn properly parses and '
'returns labels.')
if isinstance(labels, sparse_tensor.SparseTensor):
if labels.dtype == dtypes.string:
label_ids_values = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels.values)
label_ids = sparse_tensor.SparseTensor(
indices=labels.indices,
values=label_ids_values,
dense_shape=labels.dense_shape)
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(label_ids, self._n_classes))
else:
err_msg = (
r'labels must be an integer SparseTensor with values in '
r'[0, {})'.format(self._n_classes))
assert_int = check_ops.assert_integer(
labels.values, message=err_msg)
assert_less = check_ops.assert_less(
labels.values,
ops.convert_to_tensor(self._n_classes, dtype=labels.dtype),
message=err_msg)
assert_greater = check_ops.assert_non_negative(
labels.values, message=err_msg)
with ops.control_dependencies(
[assert_int, assert_less, assert_greater]):
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(labels, self._n_classes))
err_msg = (
r'labels must be an integer indicator Tensor with values in [0, 1]')
return head_lib._assert_range(labels, 2, message=err_msg) # pylint:disable=protected-access,
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode # Unused for this head.
logits = ops.convert_to_tensor(logits)
processed_labels = self._process_labels(labels)
processed_labels = head_lib._check_dense_labels_match_logits_and_reshape( # pylint:disable=protected-access
labels=processed_labels, logits=logits,
expected_labels_dimension=self.logits_dimension)
if self._loss_fn:
unweighted_loss = head_lib._call_loss_fn( # pylint:disable=protected-access
loss_fn=self._loss_fn, labels=processed_labels, logits=logits,
features=features, expected_loss_dim=1)
else:
unweighted_loss = losses.sigmoid_cross_entropy(
multi_class_labels=processed_labels, logits=logits,
reduction=losses.Reduction.NONE)
# Averages loss over classes.
unweighted_loss = math_ops.reduce_mean(
unweighted_loss, axis=-1, keep_dims=True)
weights = head_lib._get_weights_and_check_match_logits( # pylint:disable=protected-access,
features=features, weight_column=self._weight_column, logits=logits)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=self._loss_reduction)
return head_lib.LossSpec(
training_loss=training_loss,
unreduced_loss=unweighted_loss,
weights=weights,
processed_labels=processed_labels)
def create_estimator_spec(
self, features, mode, logits, labels=None, optimizer=None,
train_op_fn=None, regularization_losses=None):
"""Returns an `EstimatorSpec`.
Args:
features: Input `dict` of `Tensor` or `SparseTensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` with shape `[D0, D1, ... DN, n_classes]`.
For many applications, the shape is `[batch_size, n_classes]`.
labels: Labels with shape matching `logits`. Can be multi-hot `Tensor`
with shape `[D0, D1, ... DN, n_classes]` or `SparseTensor` with
`dense_shape` `[D0, D1, ... DN, ?]`. `labels` is required argument when
`mode` equals `TRAIN` or `EVAL`.
optimizer: `Optimizer` instance to optimize the loss in TRAIN mode.
Namely, sets `train_op = optimizer.minimize(loss, global_step)`, which
updates variables and increments `global_step`.
train_op_fn: Function that takes a scalar loss `Tensor` and returns
`train_op`. Used if `optimizer` is `None`.
regularization_losses: A list of additional scalar losses to be added to
the training loss, such as regularization losses. These losses are
usually expressed as a batch average, so for best results users need to
set `loss_reduction=SUM_OVER_BATCH_SIZE` or
`loss_reduction=SUM_OVER_NONZERO_WEIGHTS` when creating the head to
avoid scaling errors.
Returns:
`EstimatorSpec`.
Raises:
ValueError: If both `train_op_fn` and `optimizer` are `None` in TRAIN
mode, or if both are set.
"""
with ops.name_scope(self._name, 'head'):
logits = head_lib._check_logits_final_dim(logits, self.logits_dimension) # pylint:disable=protected-access
# Predict.
pred_keys = prediction_keys.PredictionKeys
with ops.name_scope(None, 'predictions', (logits,)):
probabilities = math_ops.sigmoid(logits, name=pred_keys.PROBABILITIES)
predictions = {
pred_keys.LOGITS: logits,
pred_keys.PROBABILITIES: probabilities,
}
if mode == model_fn.ModeKeys.PREDICT:
classifier_output = head_lib._classification_output( # pylint:disable=protected-access
scores=probabilities, n_classes=self._n_classes,
label_vocabulary=self._label_vocabulary)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
_DEFAULT_SERVING_KEY: classifier_output,
head_lib._CLASSIFY_SERVING_KEY: classifier_output, # pylint:disable=protected-access
head_lib._PREDICT_SERVING_KEY: ( # pylint:disable=protected-access
export_output.PredictOutput(predictions))
})
(training_loss, unreduced_loss, weights,
processed_labels) = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
if regularization_losses:
regularization_loss = math_ops.add_n(regularization_losses)
regularized_training_loss = math_ops.add_n(
[training_loss, regularization_loss])
else:
regularization_loss = None
regularized_training_loss = training_loss
# Eval.
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=regularized_training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=processed_labels,
probabilities=probabilities,
weights=weights,
unreduced_loss=unreduced_loss,
regularization_loss=regularization_loss))
# Train.
if optimizer is not None:
if train_op_fn is not None:
raise ValueError('train_op_fn and optimizer cannot both be set.')
train_op = optimizer.minimize(
regularized_training_loss,
global_step=training_util.get_global_step())
elif train_op_fn is not None:
train_op = train_op_fn(regularized_training_loss)
else:
raise ValueError('train_op_fn and optimizer cannot both be None.')
# Only summarize mean_loss for SUM reduction to preserve backwards
# compatibility. Otherwise skip it to avoid unnecessary computation.
if self._loss_reduction == losses.Reduction.SUM:
example_weight_sum = math_ops.reduce_sum(
weights * array_ops.ones_like(unreduced_loss))
mean_loss = training_loss / example_weight_sum
else:
mean_loss = None
with ops.name_scope(''):
keys = metric_keys.MetricKeys
summary.scalar(
head_lib._summary_key(self._name, keys.LOSS), # pylint:disable=protected-access
regularized_training_loss)
if mean_loss is not None:
summary.scalar(
head_lib._summary_key(self._name, keys.LOSS_MEAN), # pylint:disable=protected-access
mean_loss)
if regularization_loss is not None:
summary.scalar(
head_lib._summary_key(self._name, keys.LOSS_REGULARIZATION), # pylint:disable=protected-access
regularization_loss)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=regularized_training_loss,
train_op=train_op)
def _eval_metric_ops(
self, labels, probabilities, weights, unreduced_loss,
regularization_loss):
"""Returns a dict of metrics for eval_metric_ops."""
with ops.name_scope(
None, 'metrics',
[labels, probabilities, weights, unreduced_loss, regularization_loss]):
keys = metric_keys.MetricKeys
metric_ops = {
# Estimator already adds a metric for loss.
head_lib._summary_key(self._name, keys.LOSS_MEAN): # pylint:disable=protected-access
metrics_lib.mean(
values=unreduced_loss,
weights=weights,
name=keys.LOSS_MEAN),
head_lib._summary_key(self._name, keys.AUC): # pylint:disable=protected-access
metrics_lib.auc(labels=labels, predictions=probabilities,
weights=weights, name=keys.AUC),
head_lib._summary_key(self._name, keys.AUC_PR): # pylint:disable=protected-access
metrics_lib.auc(labels=labels, predictions=probabilities,
weights=weights, curve='PR',
name=keys.AUC_PR),
}
if regularization_loss is not None:
loss_regularization_key = head_lib._summary_key( # pylint:disable=protected-access
self._name, keys.LOSS_REGULARIZATION)
metric_ops[loss_regularization_key] = (
metrics_lib.mean(
values=regularization_loss,
name=keys.LOSS_REGULARIZATION))
for threshold in self._thresholds:
accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
metric_ops[head_lib._summary_key(self._name, accuracy_key)] = ( # pylint:disable=protected-access
head_lib._accuracy_at_threshold( # pylint:disable=protected-access
labels=labels,
predictions=probabilities,
weights=weights,
threshold=threshold,
name=accuracy_key))
# Precision for positive examples.
precision_key = keys.PRECISION_AT_THRESHOLD % threshold
metric_ops[head_lib._summary_key(self._name, precision_key)] = ( # pylint:disable=protected-access
head_lib._precision_at_threshold( # pylint:disable=protected-access
labels=labels,
predictions=probabilities,
weights=weights,
threshold=threshold,
name=precision_key))
# Recall for positive examples.
recall_key = keys.RECALL_AT_THRESHOLD % threshold
metric_ops[head_lib._summary_key(self._name, recall_key)] = ( # pylint:disable=protected-access
head_lib._recall_at_threshold( # pylint:disable=protected-access
labels=labels,
predictions=probabilities,
weights=weights,
threshold=threshold,
name=recall_key))
return metric_ops
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosCustomPoliciesOperations:
"""DdosCustomPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs: Any
) -> "_models.DdosCustomPolicy":
"""Gets information about the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs: Any
) -> "_models.DdosCustomPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosCustomPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs: Any
) -> AsyncLROPoller["_models.DdosCustomPolicy"]:
"""Creates or updates a DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.DdosCustomPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.DdosCustomPolicy":
"""Update a DDoS custom policy tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to update DDoS custom policy resource tags.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
|
|
import unittest
from g1.asyncs import kernels
from g1.bases import contexts
from g1.webs import consts
from g1.webs import wsgi_apps
from g1.webs.handlers import composers
class TestCaseBase(unittest.TestCase):
ENVIRON = {
'REQUEST_METHOD': 'GET',
'PATH_INFO': '/foo/bar',
'QUERY_STRING': '',
}
def setUp(self):
super().setUp()
self.request = None
self.response = None
self.handler = None
self.calls = []
def assert_response(self, status, headers):
self.assertIs(self.response.status, status)
self.assertEqual(self.response.headers, headers)
def assert_http_error(self, exc, status, headers):
self.assertIs(exc.status, status)
self.assertEqual(exc.headers, headers)
def set_request(self, **kwargs):
environ = self.ENVIRON.copy()
environ.update(**kwargs)
self.request = wsgi_apps.Request(
environ=environ, context=contexts.Context()
)
def make_noop_handler(self, name):
async def noop(request, response):
del request, response # Unused.
self.calls.append(name)
return noop
class MethodRouterTest(TestCaseBase):
def run_handler(self, method):
self.set_request(REQUEST_METHOD=method)
self.response = wsgi_apps._Response(None, False)
self.calls.clear()
kernels.run(
self.handler(self.request, wsgi_apps.Response(self.response)),
timeout=0.01,
)
@kernels.with_kernel
def test_router(self):
self.handler = composers.MethodRouter({
consts.METHOD_HEAD:
self.make_noop_handler('HEAD'),
consts.METHOD_GET:
self.make_noop_handler('GET'),
})
self.assertEqual(self.calls, [])
self.run_handler(consts.METHOD_GET)
self.assertEqual(self.calls, ['GET'])
self.assert_response(consts.Statuses.OK, {})
self.run_handler(consts.METHOD_HEAD)
self.assertEqual(self.calls, ['HEAD'])
self.assert_response(consts.Statuses.OK, {})
self.run_handler(consts.METHOD_OPTIONS)
self.assertEqual(self.calls, [])
self.assert_response(
consts.Statuses.NO_CONTENT,
{consts.HEADER_ALLOW: 'GET, HEAD, OPTIONS'},
)
with self.assertRaisesRegex(
wsgi_apps.HttpError,
r'unsupported request method: PUT',
) as cm:
self.run_handler(consts.METHOD_PUT)
self.assertEqual(self.calls, [])
self.assert_http_error(
cm.exception,
consts.Statuses.METHOD_NOT_ALLOWED,
{consts.HEADER_ALLOW: 'GET, HEAD, OPTIONS'},
)
@kernels.with_kernel
def test_no_auto_options(self):
self.handler = composers.MethodRouter(
{
consts.METHOD_HEAD: self.make_noop_handler('HEAD'),
consts.METHOD_GET: self.make_noop_handler('GET'),
},
auto_options=False,
)
with self.assertRaisesRegex(
wsgi_apps.HttpError,
r'unsupported request method: OPTIONS',
) as cm:
self.run_handler(consts.METHOD_OPTIONS)
self.assert_http_error(
cm.exception,
consts.Statuses.METHOD_NOT_ALLOWED,
{consts.HEADER_ALLOW: 'GET, HEAD'},
)
@kernels.with_kernel
def test_user_defined_options(self):
self.handler = composers.MethodRouter({
consts.METHOD_GET:
self.make_noop_handler('GET'),
consts.METHOD_OPTIONS:
self.make_noop_handler('OPTIONS'),
})
self.assertEqual(self.calls, [])
self.run_handler(consts.METHOD_OPTIONS)
self.assertEqual(self.calls, ['OPTIONS'])
self.assert_response(consts.Statuses.OK, {})
with self.assertRaisesRegex(
wsgi_apps.HttpError,
r'unsupported request method: PUT',
) as cm:
self.run_handler(consts.METHOD_PUT)
self.assertEqual(self.calls, [])
self.assert_http_error(
cm.exception,
consts.Statuses.METHOD_NOT_ALLOWED,
{consts.HEADER_ALLOW: 'GET, OPTIONS'},
)
def test_invalid_args(self):
with self.assertRaisesRegex(AssertionError, r'expect non-empty'):
composers.MethodRouter({})
class PathPatternRouterTest(TestCaseBase):
def assert_context_keys(self, expect):
self.assertEqual(list(self.request.context.asdict()), expect)
def run_handler(self, path):
self.set_request(PATH_INFO=path)
self.response = wsgi_apps._Response(None, False)
self.calls.clear()
kernels.run(
self.handler(self.request, wsgi_apps.Response(self.response)),
timeout=0.01,
)
@kernels.with_kernel
def test_router(self):
self.handler = composers.PathPatternRouter([
(r'/a/p', self.make_noop_handler('/a/p')),
(r'/a/q', self.make_noop_handler('/a/q')),
(r'/a', self.make_noop_handler('/a')),
])
self.set_request()
self.assertEqual(self.request.context.asdict(), {})
self.assertIsNone(composers.group(self.request))
self.assertIsNone(self.request.context.get(composers.PATH_MATCH))
self.assertEqual(composers.get_path_str(self.request), '/foo/bar')
self.run_handler('/a/p/x')
self.assertEqual(self.calls, ['/a/p'])
self.assert_context_keys([composers.PATH_MATCH])
self.assertEqual(composers.get_path_str(self.request), '/x')
self.assert_response(consts.Statuses.OK, {})
self.run_handler('/a/q')
self.assertEqual(self.calls, ['/a/q'])
self.assert_context_keys([composers.PATH_MATCH])
self.assertEqual(composers.get_path_str(self.request), '')
self.assert_response(consts.Statuses.OK, {})
self.run_handler('/a/q/')
self.assertEqual(self.calls, ['/a/q'])
self.assert_context_keys([composers.PATH_MATCH])
self.assertEqual(composers.get_path_str(self.request), '/')
self.assert_response(consts.Statuses.OK, {})
self.run_handler('/a/r/foo/bar')
self.assertEqual(self.calls, ['/a'])
self.assert_context_keys([composers.PATH_MATCH])
self.assertEqual(composers.get_path_str(self.request), '/r/foo/bar')
self.assert_response(consts.Statuses.OK, {})
with self.assertRaisesRegex(
wsgi_apps.HttpError,
r'path does not match any pattern: /foo/bar',
) as cm:
self.run_handler('/foo/bar')
self.assertEqual(self.calls, [])
self.assert_http_error(cm.exception, consts.Statuses.NOT_FOUND, {})
# You cannot override a PATH_MATCH entry in context.
self.run_handler('/a/p/x')
self.assertIn(composers.PATH_MATCH, self.request.context.asdict())
with self.assertRaisesRegex(AssertionError, r'expect.*not in'):
kernels.run(
self.handler(self.request, wsgi_apps.Response(self.response)),
timeout=0.01,
)
@kernels.with_kernel
def test_user_defined_named_groups(self):
self.handler = composers.PathPatternRouter([
(r'/(?P<d>\d+)-suffix', self.make_noop_handler('digits')),
(r'/(?P<l>[a-z]+)xyz', self.make_noop_handler('letters')),
])
self.run_handler('/012-suffix/spam/egg')
self.assertEqual(self.calls, ['digits'])
self.assert_context_keys([composers.PATH_MATCH])
self.assertEqual(composers.get_path_str(self.request), '/spam/egg')
self.assertEqual(composers.group(self.request), '/012-suffix')
self.assertEqual(composers.group(self.request, 'd'), '012')
self.assert_response(consts.Statuses.OK, {})
self.run_handler('/abcxyz/spam/egg')
self.assertEqual(self.calls, ['letters'])
self.assert_context_keys([composers.PATH_MATCH])
self.assertEqual(composers.get_path_str(self.request), '/spam/egg')
self.assertEqual(composers.group(self.request), '/abcxyz')
self.assertEqual(composers.group(self.request, 'l'), 'abc')
self.assert_response(consts.Statuses.OK, {})
@kernels.with_kernel
def test_user_defined_groups(self):
self.handler = composers.PathPatternRouter([
(r'/(\d+)-suffix', self.make_noop_handler('digits')),
(r'/([a-z]+)-suffix', self.make_noop_handler('letters')),
])
self.run_handler('/012-suffix/spam/egg')
self.assertEqual(self.calls, ['digits'])
self.assert_context_keys([composers.PATH_MATCH])
self.assertEqual(composers.get_path_str(self.request), '/spam/egg')
self.assertEqual(composers.group(self.request), '/012-suffix')
self.assertEqual(
composers.group(self.request, 0, 1),
('/012-suffix', '012'),
)
self.assert_response(consts.Statuses.OK, {})
def test_invalid_args(self):
with self.assertRaisesRegex(AssertionError, r'expect non-empty'):
composers.PathPatternRouter([])
if __name__ == '__main__':
unittest.main()
|
|
import unittest
from unittest.mock import Mock, call
from pipeline_notifier.pipeline_model import Pipeline, BuildStep, Commit
from pipeline_notifier_test.test_utils import *
class PipelineTests(unittest.TestCase):
def test_callbacks_are_set_on_build_steps(self):
step, notifier = Mock(), Mock()
Pipeline("pipeline", [step], notifier)
self.assertEquals(1, len(step.add_success_listener.mock_calls))
self.assertEquals(1, len(step.add_failure_listener.mock_calls))
def test_an_intermediate_successful_step_is_told_to_call_the_next_step(self):
step1, step2, notifier = Mock(), Mock(), Mock()
Pipeline("pipeline", [step1, step2], notifier)
success_callback = step1.add_success_listener.call_args[0][0]
success_callback([1, 2, 3])
step2.add_commit.assert_has_calls([call(1), call(2), call(3)])
def test_a_final_successful_step_causes_a_notification(self):
step1, step2, commit, notifier = Mock(), Mock(), Mock(), Mock()
pipeline = Pipeline("pipeline", [step1, step2], notifier)
success_callback = step2.add_success_listener.call_args[0][0]
success_callback(commit)
notifier.announce_pipeline_success.assert_called_once_with(pipeline, commit)
def test_a_failing_step_causes_a_notification(self):
step1, step2, commit, notifier = Mock(), Mock(), Mock(), Mock()
pipeline = Pipeline("pipeline", [step1, step2], notifier)
failure_callback = step1.add_failure_listener.call_args[0][0]
failure_callback(commit)
notifier.announce_step_failure.assert_called_once_with(step1, commit)
def test_pipeline_status_describes_pipeline_name(self):
step1, notifier = Mock(**{"status": ""}), Mock()
pipeline = Pipeline("my first pipeline", [step1], notifier)
self.assertEqual(pipeline.status["name"], "my first pipeline")
def test_pipeline_status_describes_steps(self):
step1, step2, notifier = Mock(**{"status": "status 1"}), Mock(**{"status": "status 2"}), Mock()
pipeline = Pipeline("pipeline", [step1, step2], notifier)
self.assertEqual(len(pipeline.status["steps"]), 2)
self.assertEqual(pipeline.status["steps"][0], "status 1")
self.assertEqual(pipeline.status["steps"][1], "status 2")
def test_adding_commit_to_pipeline_adds_to_the_first_step(self):
step1, step2, notifier = Mock(), Mock(), Mock()
commit1 = MockCommit("commit1")
pipeline = Pipeline("pipeline", [step1, step2], notifier)
pipeline.add_commit(commit1)
step1.add_commit.assert_called_once_with(commit1)
self.assertEquals(0, step2.add_commit.call_count)
def test_starting_steps_starts_the_matching_step(self):
step1, step2, notifier = MockStep("step1"), MockStep("step2"), Mock()
pipeline = Pipeline("pipeline", [step1, step2], notifier)
pipeline.start_step("step1")
self.assertEqual(step1.start.call_count, 1)
self.assertEqual(step2.start.call_count, 0)
def test_passing_steps_passes_the_matching_step(self):
step1, step2, notifier = MockStep("step1"), MockStep("step2"), Mock()
pipeline = Pipeline("pipeline", [step1, step2], notifier)
pipeline.pass_step("step2")
self.assertEqual(step1.succeed.call_count, 0)
self.assertEqual(step2.succeed.call_count, 1)
def test_failing_steps_fails_the_matching_step(self):
step1, step2, notifier = MockStep("step1"), MockStep("step2"), Mock()
pipeline = Pipeline("pipeline", [step1, step2], notifier)
pipeline.fail_step("step1")
self.assertEqual(step1.fail.call_count, 1)
self.assertEqual(step2.fail.call_count, 0)
def test_starting_passing_and_failing_steps_do_nothing_if_no_step_is_matched(self):
step1, step2, notifier = MockStep("step1"), MockStep("step2"), Mock()
pipeline = Pipeline("pipeline", [step1, step2], notifier)
pipeline.start_step("step3")
self.assertEqual(step1.start.call_count, 0)
self.assertEqual(step1.succeed.call_count, 0)
self.assertEqual(step1.fail.call_count, 0)
self.assertEqual(step2.start.call_count, 0)
self.assertEqual(step2.succeed.call_count, 0)
self.assertEqual(step2.fail.call_count, 0)
class BuildStepTests(unittest.TestCase):
def test_build_step_passes_call_success_callbacks(self):
step, commit1, commit2, callback = BuildStep("step1"), MockCommit("1"), MockCommit("2"), Mock()
step.add_success_listener(callback)
step.add_commit(commit1)
step.add_commit(commit2)
step.start()
step.succeed()
callback.assert_called_once_with([commit1, commit2])
def test_build_step_failures_call_failure_callbacks(self):
step, commit1, commit2, callback = BuildStep("step1"), MockCommit("1"), MockCommit("2"), Mock()
step.add_failure_listener(callback)
step.add_commit(commit1)
step.add_commit(commit2)
step.start()
step.fail()
callback.assert_called_once_with([commit1, commit2])
def test_build_steps_only_passes_commits_present_when_the_step_was_started(self):
step, commit1, commit2, callback = BuildStep("step1"), MockCommit("1"), MockCommit("2"), Mock()
step.add_success_listener(callback)
step.add_commit(commit1)
step.start()
step.add_commit(commit2)
step.succeed()
callback.assert_called_once_with([commit1])
def test_build_steps_only_fails_commits_present_when_the_step_was_started(self):
step, commit1, commit2, callback = BuildStep("step1"), MockCommit("1"), MockCommit("2"), Mock()
step.add_failure_listener(callback)
step.add_commit(commit1)
step.start()
step.add_commit(commit2)
step.fail()
callback.assert_called_once_with([commit1])
def test_build_step_doesnt_call_wrong_callbacks(self):
step = BuildStep("step1")
commit1, commit2 = MockCommit("1"), MockCommit("2")
success_callback, failure_callback = Mock(), Mock()
step.add_success_listener(success_callback)
step.add_failure_listener(failure_callback)
step.add_commit(commit1)
step.start()
step.fail()
step.add_commit(commit2)
step.start()
step.succeed()
failure_callback.assert_called_once_with([commit1])
success_callback.assert_called_once_with([commit2])
def test_step_status_lists_waiting_commits(self):
step = BuildStep("a step")
commit1, commit2 = Mock(**{"status":"commit 1"}), Mock(**{"status":"commit 2"})
step.add_commit(commit1)
step.start()
step.succeed()
step.add_commit(commit2)
self.assertEqual(step.status["waiting"], ["commit 2"])
def test_step_status_lists_in_progress_commits(self):
step = BuildStep("a step")
commit1 = Mock(**{"status": "commit status"})
step.add_commit(commit1)
step.start()
self.assertEqual(step.status["waiting"], [])
self.assertEqual(step.status["in-progress"], ["commit status"])
def test_step_status_includes_step_name(self):
step = BuildStep("my build step")
self.assertEqual(step.status["name"], "my build step")
class CommitTests(unittest.TestCase):
def test_commit_name_is_saved(self):
commit = Commit("author", "branch", "message", "hash")
self.assertEquals(commit.author, "author")
self.assertEquals(commit.branch, "branch")
self.assertEquals(commit.message, "message")
self.assertEquals(commit.hash, "hash")
def test_commit_description_contains_user__branch_and_short_message(self):
commit = Commit("A User", "master", "My Commit Message\nSome more message", "123qwe")
self.assertEquals(commit.description, "A User on master: My Commit Message")
def test_commit_status_contains_user_and_hash(self):
commit = Commit("A User", "master", "My Commit Message\nSome more message", "123qwe")
self.assertEquals(commit.status, "123qwe by 'A User'")
|
|
"""
Author: Dr. John T. Hwang <[email protected]>
Dr. Mohamed A. Bouhlel <mbouhlel@umich>
This package is distributed under New BSD license.
"""
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import TensorProduct
from smt.sampling_methods import LHS, FullFactorial
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import LS, QP, KPLS, KRG, KPLSK, GEKPLS, GENN, MGP
try:
from smt.surrogate_models import IDW, RBF, RMTC, RMTB
compiled_available = True
except:
compiled_available = False
print_output = False
def genn():
neural_net = GENN()
neural_net.options["alpha"] = 0.1 # learning rate that controls optimizer step size
neural_net.options["beta1"] = 0.9 # tuning parameter to control ADAM optimization
neural_net.options["beta2"] = 0.99 # tuning parameter to control ADAM optimization
neural_net.options[
"lambd"
] = 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization
neural_net.options[
"gamma"
] = 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement
neural_net.options["deep"] = 2 # number of hidden layers
neural_net.options["wide"] = 12 # number of nodes per hidden layer
neural_net.options[
"mini_batch_size"
] = 10000 # used to divide data into training batches (use for large data sets)
neural_net.options["num_epochs"] = 25 # number of passes through data
neural_net.options[
"num_iterations"
] = 100 # number of optimizer iterations per mini-batch
neural_net.options["is_print"] = True
return neural_net
class Test(SMTestCase):
def setUp(self):
ndim = 3
nt = 100
ne = 100
ncomp = 1
problems = OrderedDict()
problems["exp"] = TensorProduct(ndim=ndim, func="exp")
problems["tanh"] = TensorProduct(ndim=ndim, func="tanh")
problems["cos"] = TensorProduct(ndim=ndim, func="cos")
sms = OrderedDict()
sms["LS"] = LS()
sms["QP"] = QP()
sms["KRG"] = KRG(theta0=[1e-2] * ndim)
sms["KPLS"] = KPLS(theta0=[1e-2] * ncomp, n_comp=ncomp)
sms["KPLSK"] = KPLSK(theta0=[1] * ncomp, n_comp=ncomp)
sms["MGP"] = KPLSK(theta0=[1e-2] * ncomp, n_comp=ncomp)
sms["GEKPLS"] = GEKPLS(theta0=[1e-2] * 2, n_comp=2, delta_x=1e-1)
sms["GENN"] = genn()
if compiled_available:
sms["IDW"] = IDW()
sms["RBF"] = RBF()
sms["RMTC"] = RMTC()
sms["RMTB"] = RMTB()
t_errors = {}
t_errors["LS"] = 1.0
t_errors["QP"] = 1.0
t_errors["KRG"] = 1.2
t_errors["MFK"] = 1e0
t_errors["KPLS"] = 1.2
t_errors["KPLSK"] = 1e0
t_errors["MGP"] = 1e0
t_errors["GEKPLS"] = 1.4
t_errors["GENN"] = 1.2
if compiled_available:
t_errors["IDW"] = 1e0
t_errors["RBF"] = 1e-2
t_errors["RMTC"] = 1e-1
t_errors["RMTB"] = 1e-1
e_errors = {}
e_errors["LS"] = 1.5
e_errors["QP"] = 1.5
e_errors["KRG"] = 1e-2
e_errors["MFK"] = 1e-2
e_errors["KPLS"] = 2e-2
e_errors["KPLSK"] = 1e-2
e_errors["MGP"] = 2e-2
e_errors["GEKPLS"] = 2e-2
e_errors["GENN"] = 2e-2
if compiled_available:
e_errors["IDW"] = 1e0
e_errors["RBF"] = 1e0
e_errors["RMTC"] = 2e-1
e_errors["RMTB"] = 2e-1
self.nt = nt
self.ne = ne
self.ndim = ndim
self.problems = problems
self.sms = sms
self.t_errors = t_errors
self.e_errors = e_errors
def run_test(self):
method_name = inspect.stack()[1][3]
pname = method_name.split("_")[1]
sname = method_name.split("_")[2]
prob = self.problems[pname]
sampling = LHS(xlimits=prob.xlimits, random_state=42)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
print(prob(xt, kx=0).shape)
for i in range(self.ndim):
yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
sm0 = self.sms[sname]
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
if sname in ["KPLS", "KRG", "KPLSK", "GEKPLS"]:
optname = method_name.split("_")[3]
sm.options["hyper_opt"] = optname
sm.set_training_values(xt, yt[:, 0])
if sm.supports["training_derivatives"]:
for i in range(self.ndim):
sm.set_training_derivatives(xt, yt[:, i + 1], i)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
if sm.supports["variances"]:
sm.predict_variances(xe)
if pname == "cos":
self.assertLessEqual(e_error, self.e_errors[sname] + 1.5)
else:
self.assertLessEqual(e_error, self.e_errors[sname] + 1e-4)
self.assertLessEqual(t_error, self.t_errors[sname] + 1e-4)
def test_exp_LS(self):
self.run_test()
def test_exp_QP(self):
self.run_test()
def test_exp_KRG_Cobyla(self):
self.run_test()
def test_exp_KRG_TNC(self):
self.run_test()
def test_exp_KPLS_Cobyla(self):
self.run_test()
def test_exp_KPLS_TNC(self):
self.run_test()
def test_exp_KPLSK_Cobyla(self):
self.run_test()
def test_exp_KPLSK_TNC(self):
self.run_test()
def test_exp_MGP(self):
self.run_test()
def test_exp_GEKPLS_Cobyla(self):
self.run_test()
def test_exp_GEKPLS_TNC(self):
self.run_test()
def test_exp_GENN(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_IDW(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RBF(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: tanh
def test_tanh_LS(self):
self.run_test()
def test_tanh_QP(self):
self.run_test()
def test_tanh_KRG_Cobyla(self):
self.run_test()
def test_tanh_KRG_TNC(self):
self.run_test()
def test_tanh_KPLS_Cobyla(self):
self.run_test()
def test_tanh_KPLS_TNC(self):
self.run_test()
def test_tanh_KPLSK_Cobyla(self):
self.run_test()
def test_tanh_KPLSK_TNC(self):
self.run_test()
def test_tanh_MGP(self):
self.run_test()
def test_tanh_GEKPLS_Cobyla(self):
self.run_test()
def test_tanh_GEKPLS_TNC(self):
self.run_test()
def test_tanh_GENN(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_IDW(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_RBF(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: cos
def test_cos_LS(self):
self.run_test()
def test_cos_QP(self):
self.run_test()
def test_cos_KRG_Cobyla(self):
self.run_test()
def test_cos_KRG_TNC(self):
self.run_test()
def test_cos_KPLS_Cobyla(self):
self.run_test()
def test_cos_KPLS_TNC(self):
self.run_test()
def test_cos_KPLSK_Cobyla(self):
self.run_test()
def test_cos_KPLSK_TNC(self):
self.run_test()
def test_cos_MGP(self):
self.run_test()
def test_cos_GEKPLS_Cobyla(self):
self.run_test()
def test_cos_GEKPLS_TNC(self):
self.run_test()
def test_cos_GENN(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_IDW(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_RBF(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_RMTB(self):
self.run_test()
if __name__ == "__main__":
print_output = True
print("%6s %8s %18s %18s" % ("SM", "Problem", "Train. pt. error", "Test pt. error"))
unittest.main()
|
|
"""
Author: Eric J. Ma
Affiliation: Massachusetts Institute of Technology
Purpose of this Python definitions file:
- To provide accuracy metric functions for use in simulations.
"""
# Prototype #2 for Accuracy Functions (created on 2 July 2014)
def fraction_accurate_reassortants(simulator, reconstructor, reconstruction_type='reconstruction'):
"""
This method takes in the simulator and reconstructor objects, and returns
the fraction of reassortants that are correct.
INPUTS:
- OBJECTS: simulator, reconstructor
The simulator and reconstructor objects that hold the graphs.
- STRING: reconstruction_type
A string specifying the type of reconstruction that we want to
evaluate accuracy for.
Currently, we allow:
- 'reconstruction': the best reconstruction possible.
- 'reassigned_source': a shuffled version of the reconstruction,
in which the edges are shuffled by ignoring genetic similarity.
OUTPUTS:
- FLOAT: float(overlaps) / len(simulated)
The fraction of simulation reassortants that were correctly
identified as such in the reconstruction.
"""
simulation = [str(item) for item in simulator.reassortants()]
reconstruction = reconstructor.reassortants(reconstruction_type=reconstruction_type)
overlaps = 0
for isolate in reconstruction:
if isolate in simulation:
overlaps += 1
if len(simulation) == 0:
return 0
else:
return float(overlaps) / len(simulation)
def fraction_inaccurate_reassortants(simulator, reconstructor, reconstruction_type='reconstruction'):
"""
This method takes in the list of simulation reassortants and reconstruction
reassortants, and returns the fraction of the reconstruction reassortants
that were incorrect.
INPUTS:
- OBJECTS: simulator, reconstructor
The simulator and reconstructor objects that hold the graphs.
- STRING: reconstruction_type
A string specifying the type of reconstruction that we want to
evaluate accuracy for.
Currently, we allow:
- 'reconstruction': the best reconstruction possible.
- 'reassigned_source': a shuffled version of the reconstruction,
in which the edges are shuffled by ignoring genetic similarity.
OUTPUTS:
- FLOAT: float(incorrect) / len(reconstructed)
The fraction reconstruction reassortants that were not present as
reassortants in the simulation.
"""
simulation = [str(item) for item in simulator.reassortants()]
reconstruction = reconstructor.reassortants(reconstruction_type=reconstruction_type)
incorrect = 0
for isolate in reconstruction:
if isolate not in simulation:
incorrect += 1
if len(reconstruction) == 0:
return 0
else:
return float(incorrect) / len(reconstruction)
def fraction_accurate_edges(simulator, reconstructor, reconstruction_type='reconstruction'):
"""
This method takes in a simulator and reconstructor object, and returns the
fraction of accurate edges that were identified in the Reconstructor's
reconstruction network.
INPUTS:
- OBJECTS: simulator, reconstructor
The simulator and reconstructor objects that hold the graphs.
- STRING: reconstruction_type
A string specifying the type of reconstruction that we want to
evaluate accuracy for.
Currently, we allow:
- 'reconstruction': the best reconstruction possible.
- 'reassigned_source': a shuffled version of the reconstruction,
in which the edges are shuffled by ignoring genetic similarity.
OUTPUTS:
- FLOAT: float(overlaps) / len(simulation)
The fraction of simulation edges that were correctly identified in
the specified reconstruction.
"""
simulation = simulator.relabeled_transmission_graph.edges(data=True)
if reconstruction_type == 'reconstruction':
reconstruction = reconstructor.pruned_condensed_graph.edges(data=True)
if reconstruction_type == 'reassigned_source':
reconstruction = reconstructor.reassigned_source_graph.edges(data=True)
overlaps = 0
for edge in reconstruction:
if edge in simulation:
overlaps += 1
if len(simulation) == 0:
return 0
else:
return float(overlaps) / len(simulation)
def fraction_inaccurate_edges(simulator, reconstructor, reconstruction_type='reconstruction'):
"""
This method takes in a simulator and reconstructor object, and returns the
fraction of edges in the reconstruction that were not present in the
simulation.
INPUTS:
- OBJECTS: simulator, reconstructor
The simulator and reconstructor objects that hold the graphs.
- STRING: reconstruction_type
A string specifying the type of reconstruction that we want to
evaluate accuracy for.
Currently, we allow:
- 'reconstruction': the best reconstruction possible.
- 'reassigned_source': a shuffled version of the reconstruction,
in which the edges are shuffled by ignoring genetic similarity.
OUTPUTS:
- FLOAT: float(overlaps) / len(simulation)
The fraction of simulation edges that were correctly identified in the specified reconstruction.
"""
simulation = simulator.relabeled_transmission_graph.edges(data=True)
if reconstruction_type == 'reconstruction':
reconstruction = reconstructor.pruned_condensed_graph.edges(data=True)
if reconstruction_type == 'reassigned_source':
reconstruction = reconstructor.reassigned_source_graph.edges(data=True)
incorrect = 0
for edge in reconstruction:
if edge not in simulation:
incorrect += 1
if len(reconstruction) == 0:
return 0
else:
return float(incorrect) / len(reconstruction)
def path_accuracy(simulator, reconstructor, reconstruction_type='reconstruction'):
"""
This method takes in a simulator and reconstructor object, and returns the
fraction of edges in the reconstruction that represented a path in the
original simulation. This becomes especially pertinent for the case where
sampling occurs.
INPUTS:
- OBJECTS: simulator, reconstructor
The simulator and reconstructor objects that hold the graphs.
- STRING: reconstruction_type
A string specifying the type of reconstruction that we want to
evaluate accuracy for.
Currently, we allow:
- 'reconstruction': the best reconstruction possible.
- 'reassigned_source': a shuffled version of the reconstruction,
in which the edges are shuffled by ignoring genetic similarity.
OUTPUTS:
- FLOAT: float(num_correct) / float(num_considered)
The fraction of edges in the reconstruction that represented an
accurate path in the simulation.
"""
# simulation = simulator.relabeled_transmission_graph.edges(data=True)
# simulation_full = simulator.full_transmission_graph.edges()
# simulation_reas = simulator.reassortant_edges
if reconstruction_type == 'reconstruction':
reconstruction = reconstructor.pruned_condensed_graph.edges(data=True)
if reconstruction_type == 'reassigned_source':
reconstruction = reconstructor.reassigned_source_graph.edges(data=True)
num_considered = 0
num_correct = 0
for edge in reconstruction:
num_considered += 1
if reconstructor.is_full_transmission_edge(edge):
if simulator.full_transmission_path_exists(edge[0], edge[1]):
num_correct += 1
else:
num_correct_segments = 0
for segment in edge[2]['segments']:
if simulator.segment_transmission_path_exists(edge[0], edge[1], int(segment)):
num_correct_segments += 1
if num_correct_segments == len(edge[2]['segments']):
num_correct += 1
if not reconstructor.is_full_transmission_edge(edge):
num_correct_segments = 0
for segment in edge[2]['segments']:
if simulator.segment_transmission_path_exists(edge[0], edge[1], int(segment)):
num_correct_segments += 1
if len(edge[2]['segments']) == num_correct_segments:
num_correct += 1
else:
pass
return float(num_correct) / float(len(reconstruction))
def fraction_accurate_reassortant_edges(simulator, reconstructor, reconstruction_type='reconstruction'):
"""
This method takes in the simulator and reconstructor objects, and returns
the fraction of ground truth reassortant edges that were found in the
reconstruction.
INPUTS:
- OBJECTS: simulator, reconstructor
The simulator and reconstructor objects that hold the graphs.
- STRING: reconstruction_type
A string specifying the type of reconstruction that we want to
evaluate accuracy for.
Currently, we allow:
- 'reconstruction': the best reconstruction possible.
- 'reassigned_source': a shuffled version of the reconstruction,
in which the edges are shuffled by ignoring genetic similarity.
OUTPUTS:
- FLOAT: float(overlaps) / len(simulated)
The fraction of simulation reassortant edges that were correctly
identified
as such in the reconstruction.
"""
simulation = simulator.reassortant_edges
reconstruction = reconstructor.reassortant_edges(reconstruction_type)
overlaps = 0
for edge in reconstruction:
if edge in simulation:
overlaps += 1
if len(simulation) == 0:
return 0
else:
return float(overlaps)/len(simulation)
def fraction_inaccurate_reassortant_edges(simulator, reconstructor, reconstruction_type='reconstruction'):
"""
This method takes in the simulator and reconstructor objects, and returns
the fraction of reconstruction reassortant edges that are incorrect.
INPUTS:
- OBJECTS: simulator, reconstructor
The simulator and reconstructor objects that hold the graphs.
- STRING: reconstruction_type
A string specifying the type of reconstruction that we want to
evaluate accuracy for.
Currently, we allow:
- 'reconstruction': the best reconstruction possible.
- 'reassigned_source': a shuffled version of the reconstruction,
in which the edges are shuffled by ignoring genetic similarity.
OUTPUTS:
- FLOAT: float(incorrect) / len(reconstruction)
The fraction of reconstruction reassortant edges that were
incorrectly identified as reassortant edges.
"""
simulation = simulator.reassortant_edges
reconstruction = reconstructor.reassortant_edges(reconstruction_type)
incorrect = 0
for edge in reconstruction:
if edge not in simulation:
incorrect += 1
if len(reconstruction) == 0:
return 0
else:
return float(incorrect) / len(reconstruction)
def reassortant_path_accuracy(simulator, reconstructor, reconstruction_type='reconstruction'):
"""
This method takes in a simulator and reconstructor object, and returns the
fraction of reassortant edges in the reconstruction that represented the
segment transmission path in the simulation.
INPUTS:
- OBJECTS: simulator, reconstructor
The simulator and reconstructor objects that hold the graphs.
- STRING: reconstruction_type
A string specifying the type of reconstruction that we want to
evaluate accuracy for.
Currently, we allow:
- 'reconstruction': the best reconstruction possible.
- 'reassigned_source': a shuffled version of the reconstruction,
in which the edges are shuffled by ignoring genetic similarity.
OUTPUTS:
- FLOAT: float(num_correct) / float(num_considered)
The fraction of segment transmission edges in the reconstruction
that represented an accurate path in the simulation.
"""
simulation = simulator.relabeled_transmission_graph.edges(data=True)
reconstruction = reconstructor.reassortant_edges(reconstruction_type)
num_considered = 0
num_correct = 0
for edge in reconstruction:
for segment in edge[2]['segments']:
num_considered += 1
if simulator.segment_transmission_path_exists(edge[0], edge[1], int(segment)):
num_correct += 1
if len(reconstruction) == 0:
return 0
else:
return float(num_correct) / float(num_considered)
|
|
import subprocess
import xmltodict
import os
import sys
import logging
import stat
from functools import lru_cache
import iepy
from iepy.utils import DIRS, unzip_from_url
logger = logging.getLogger(__name__)
def detect_java_version():
java_cmd = os.getenv('JAVAHOME')
if not java_cmd:
print('Environment variable JAVAHOME not defined.')
sys.exit(-1)
here = os.path.dirname(os.path.realpath(__file__))
jar = os.path.join(here, 'utils', 'get-java-version.jar')
jversion = subprocess.check_output([java_cmd, "-jar", jar], stderr=subprocess.PIPE)
return int(jversion.strip())
JAVA_VERSION = detect_java_version()
_STANFORD_BASE_URL = "http://nlp.stanford.edu/software/"
if JAVA_VERSION < 8:
# Stanford Core NLP 3.4.1 - Last version to support Java 6 and Java 7
# Pitifully Stanford folks have a public name ("version") of their releases that isn't
# used on their download urls. So, 3.4.1 is "stanford-corenlp-full-2014-08-27"
_CORENLP_VERSION = "stanford-corenlp-full-2014-08-27"
DOWNLOAD_URL = _STANFORD_BASE_URL + _CORENLP_VERSION + ".zip"
DOWNLOAD_URL_ES = _STANFORD_BASE_URL + 'stanford-spanish-corenlp-2014-08-26-models.jar'
DOWNLOAD_URL_DE = _STANFORD_BASE_URL + 'stanford-german-2016-01-19-models.jar'
_FOLDER_PATH = os.path.join(DIRS.user_data_dir, _CORENLP_VERSION)
COMMAND_PATH = os.path.join(_FOLDER_PATH, "corenlp.sh")
else:
# Stanford Core NLP 3.5.2
_CORENLP_VERSION = "stanford-corenlp-full-2015-04-20"
DOWNLOAD_URL_ES = _STANFORD_BASE_URL + 'stanford-spanish-corenlp-2015-01-08-models.jar'
DOWNLOAD_URL_DE = _STANFORD_BASE_URL + 'stanford-german-2016-01-19-models.jar'
DOWNLOAD_URL = _STANFORD_BASE_URL + _CORENLP_VERSION + ".zip"
_FOLDER_PATH = os.path.join(DIRS.user_data_dir, _CORENLP_VERSION)
COMMAND_PATH = os.path.join(_FOLDER_PATH, "corenlp.sh")
@lru_cache(maxsize=1)
def get_analizer(*args, **kwargs):
logger.info("Loading StanfordCoreNLP...")
return StanfordCoreNLP(*args, **kwargs)
class StanfordCoreNLP:
CMD_ARGS = "-outputFormat xml -threads 4"
PROMPT = b"\nNLP> "
def __init__(self, tokenize_with_whitespace=False, gazettes_filepath=None):
cmd_args = self.command_args(tokenize_with_whitespace, gazettes_filepath)
os.chdir(_FOLDER_PATH)
self.corenlp_cmd = [COMMAND_PATH] + cmd_args
self._start_proc()
def _start_proc(self):
self.proc = subprocess.Popen(
self.corenlp_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=_FOLDER_PATH
)
self.output = self.iter_output_segments()
self.receive() # Wait until the prompt is ready
def command_args(self, tokenize_with_whitespace, gazettes_filepath):
annotators = ["tokenize", "ssplit", "pos", "lemma", "ner", "parse", "dcoref"]
cmd_args = self.CMD_ARGS[:]
if tokenize_with_whitespace:
cmd_args += " -tokenize.whitespace=true"
if gazettes_filepath:
annotators.insert(annotators.index("ner") + 1, "regexner")
cmd_args += " -regexner.mapping {}".format(gazettes_filepath)
tkn_opts = self._tokenizer_options()
if tkn_opts:
cmd_args += " " + tkn_opts
lang = iepy.instance.settings.IEPY_LANG
edu_mods = "edu/stanford/nlp/models"
if lang == 'es':
annotators.remove('dcoref') # not supported for spanish on Stanford 3.4.1
cmd_args += " -tokenize.language es"
cmd_args += " -pos.model %s/pos-tagger/spanish/spanish-distsim.tagger" % edu_mods
cmd_args += " -ner.model %s/ner/spanish.ancora.distsim.s512.crf.ser.gz" % edu_mods
cmd_args += " -parse.model %s/lexparser/spanishPCFG.ser.gz" % edu_mods
if lang == 'de':
annotators.remove('dcoref') # not supported for german on Stanford 3.4.1
cmd_args += " -tokenize.language de"
cmd_args += " -pos.model %s/pos-tagger/german/german-dewac.tagger" % edu_mods
cmd_args += " -ner.model %s/ner/german.dewac_175m_600.crf.ser.gz" % edu_mods
cmd_args += " -parse.model %s/lexparser/germanPCFG.ser.gz" % edu_mods
cmd_args += " -annotators {}".format(",".join(annotators))
return cmd_args.split()
def _tokenizer_options(self):
"""As stated in
http://nlp.stanford.edu/nlp/javadoc/javanlp/edu/stanford/nlp/process/PTBTokenizer.html
there are several tokenizer options that can be changed.
We'll only send to command line those that differ from the Stanford default.
"""
extra_keys = ['ptb3Escaping']
defaults = {
'invertible': False,
'tokenizeNLs': False,
'americanize': True,
'normalizeSpace': True,
'normalizeAmpersandEntity': True,
'normalizeCurrency': True,
'normalizeFractions': True,
'normalizeParentheses': True,
'normalizeOtherBrackets': True,
'asciiQuotes': False,
'latexQuotes': True,
'unicodeQuotes': False,
'ptb3Ellipsis': True,
'unicodeEllipsis': False,
'ptb3Dashes': True,
'keepAssimilations': True,
'escapeForwardSlashAsterisk': True,
'untokenizable': "firstDelete",
'strictTreebank3': False
}
allowed_keys = set(defaults.keys()).union(extra_keys)
customizations = getattr(iepy.instance.settings, 'CORENLP_TKN_OPTS', {})
opts = []
for k, v in customizations.items():
if k not in allowed_keys:
raise ValueError('Invalid key "%s". Valid options are %s' % (k, allowed_keys))
if k in defaults and defaults[k] == v:
# valid option, but it's the defaults, so no need to provide it.
continue
if isinstance(v, bool):
v = ("%s" % v).lower()
opts.append("%s=%s" % (k, v))
if opts:
return '-tokenize.options "{}"'.format(','.join(opts))
def iter_output_segments(self):
while True:
buf = b""
while self.PROMPT not in buf:
buf += self.proc.stdout.read1(1024)
if self.proc.poll() == 1:
logger.error("Error running '{}'".format(" ".join(self.corenlp_cmd)))
logger.error("Output was: '{}'".format(buf))
sys.exit(1)
segment, _, buf = buf.partition(self.PROMPT)
yield segment.decode("utf8")
def receive(self):
return next(self.output)
def send(self, data):
data = data.replace("\n", " ") + "\n"
self.proc.stdin.write(data.encode("utf8"))
self.proc.stdin.flush()
def quit(self):
self.proc.stdin.write("q\n".encode("utf8"))
self.proc.stdin.flush()
@lru_cache(maxsize=1)
def analyse(self, text):
self.send(text)
text = self.receive()
i = text.index("<?xml version")
text = text[i:]
return xmltodict.parse(text)["root"]["document"]
def download(lang='en'):
base = os.path.dirname(COMMAND_PATH)
if os.path.isfile(COMMAND_PATH):
print("Stanford CoreNLP is already downloaded at {}.".format(base))
else:
print("Downloading Stanford CoreNLP...")
unzip_from_url(DOWNLOAD_URL, DIRS.user_data_dir)
# Zip acquired. Make sure right Java is used, and file is executable
for directory in os.listdir(DIRS.user_data_dir):
if directory.startswith("stanford-corenlp-full"):
stanford_directory = os.path.join(DIRS.user_data_dir, directory)
if os.path.isdir(stanford_directory):
runner_path = os.path.join(stanford_directory, "corenlp.sh")
st = os.stat(runner_path)
_content = open(runner_path).read()
_content = _content.replace('java', '$JAVAHOME')
with open(runner_path, 'w') as runner_file:
runner_file.write(_content)
os.chmod(runner_path, st.st_mode | stat.S_IEXEC)
break
# Download extra data for specific language
download_urls = dict(es=DOWNLOAD_URL_ES, de=DOWNLOAD_URL_DE)
if lang.lower() in download_urls.keys():
print("Downloading Stanford CoreNLP extra data for lang '{}'...".format(lang))
unzip_from_url(download_urls[lang.lower()], _FOLDER_PATH)
elif lang.lower() != 'en':
print("There are no extra data to download for lang '{}'.".format(lang))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from collections import namedtuple
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.udf import UserDefinedFunction
from pyspark.sql.types import IntegerType, StringType, StructType
Database = namedtuple("Database", "name description locationUri")
Table = namedtuple("Table", "name database description tableType isTemporary")
Column = namedtuple("Column", "name description dataType nullable isPartition isBucket")
Function = namedtuple("Function", "name description className isTemporary")
class Catalog(object):
"""User-facing catalog API, accessible through `SparkSession.catalog`.
This is a thin wrapper around its Scala implementation org.apache.spark.sql.catalog.Catalog.
"""
def __init__(self, sparkSession):
"""Create a new Catalog that wraps the underlying JVM object."""
self._sparkSession = sparkSession
self._jsparkSession = sparkSession._jsparkSession
self._jcatalog = sparkSession._jsparkSession.catalog()
@ignore_unicode_prefix
@since(2.0)
def currentDatabase(self):
"""Returns the current default database in this session."""
return self._jcatalog.currentDatabase()
@ignore_unicode_prefix
@since(2.0)
def setCurrentDatabase(self, dbName):
"""Sets the current default database in this session."""
return self._jcatalog.setCurrentDatabase(dbName)
@ignore_unicode_prefix
@since(2.0)
def listDatabases(self):
"""Returns a list of databases available across all sessions."""
iter = self._jcatalog.listDatabases().toLocalIterator()
databases = []
while iter.hasNext():
jdb = iter.next()
databases.append(Database(
name=jdb.name(),
description=jdb.description(),
locationUri=jdb.locationUri()))
return databases
@ignore_unicode_prefix
@since(2.0)
def listTables(self, dbName=None):
"""Returns a list of tables/views in the specified database.
If no database is specified, the current database is used.
This includes all temporary views.
"""
if dbName is None:
dbName = self.currentDatabase()
iter = self._jcatalog.listTables(dbName).toLocalIterator()
tables = []
while iter.hasNext():
jtable = iter.next()
tables.append(Table(
name=jtable.name(),
database=jtable.database(),
description=jtable.description(),
tableType=jtable.tableType(),
isTemporary=jtable.isTemporary()))
return tables
@ignore_unicode_prefix
@since(2.0)
def listFunctions(self, dbName=None):
"""Returns a list of functions registered in the specified database.
If no database is specified, the current database is used.
This includes all temporary functions.
"""
if dbName is None:
dbName = self.currentDatabase()
iter = self._jcatalog.listFunctions(dbName).toLocalIterator()
functions = []
while iter.hasNext():
jfunction = iter.next()
functions.append(Function(
name=jfunction.name(),
description=jfunction.description(),
className=jfunction.className(),
isTemporary=jfunction.isTemporary()))
return functions
@ignore_unicode_prefix
@since(2.0)
def listColumns(self, tableName, dbName=None):
"""Returns a list of columns for the given table/view in the specified database.
If no database is specified, the current database is used.
Note: the order of arguments here is different from that of its JVM counterpart
because Python does not support method overloading.
"""
if dbName is None:
dbName = self.currentDatabase()
iter = self._jcatalog.listColumns(dbName, tableName).toLocalIterator()
columns = []
while iter.hasNext():
jcolumn = iter.next()
columns.append(Column(
name=jcolumn.name(),
description=jcolumn.description(),
dataType=jcolumn.dataType(),
nullable=jcolumn.nullable(),
isPartition=jcolumn.isPartition(),
isBucket=jcolumn.isBucket()))
return columns
@since(2.0)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
warnings.warn(
"createExternalTable is deprecated since Spark 2.2, please use createTable instead.",
DeprecationWarning)
return self.createTable(tableName, path, source, schema, **options)
@since(2.2)
def createTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used. When ``path`` is specified, an external table is
created from the data at the given path. Otherwise a managed table is created.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created table.
:return: :class:`DataFrame`
"""
if path is not None:
options["path"] = path
if source is None:
source = self._sparkSession._wrapped._conf.defaultDataSourceName()
if schema is None:
df = self._jcatalog.createTable(tableName, source, options)
else:
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
scala_datatype = self._jsparkSession.parseDataType(schema.json())
df = self._jcatalog.createTable(tableName, source, scala_datatype, options)
return DataFrame(df, self._sparkSession._wrapped)
@since(2.0)
def dropTempView(self, viewName):
"""Drops the local temporary view with the given view name in the catalog.
If the view has been cached before, then it will also be uncached.
Returns true if this view is dropped successfully, false otherwise.
Note that, the return type of this method was None in Spark 2.0, but changed to Boolean
in Spark 2.1.
>>> spark.createDataFrame([(1, 1)]).createTempView("my_table")
>>> spark.table("my_table").collect()
[Row(_1=1, _2=1)]
>>> spark.catalog.dropTempView("my_table")
>>> spark.table("my_table") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: ...
"""
self._jcatalog.dropTempView(viewName)
@since(2.1)
def dropGlobalTempView(self, viewName):
"""Drops the global temporary view with the given view name in the catalog.
If the view has been cached before, then it will also be uncached.
Returns true if this view is dropped successfully, false otherwise.
>>> spark.createDataFrame([(1, 1)]).createGlobalTempView("my_table")
>>> spark.table("global_temp.my_table").collect()
[Row(_1=1, _2=1)]
>>> spark.catalog.dropGlobalTempView("my_table")
>>> spark.table("global_temp.my_table") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: ...
"""
self._jcatalog.dropGlobalTempView(viewName)
@since(2.0)
def registerFunction(self, name, f, returnType=None):
"""An alias for :func:`spark.udf.register`.
See :meth:`pyspark.sql.UDFRegistration.register`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.register` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.register instead.",
DeprecationWarning)
return self._sparkSession.udf.register(name, f, returnType)
@since(2.0)
def isCached(self, tableName):
"""Returns true if the table is currently cached in-memory."""
return self._jcatalog.isCached(tableName)
@since(2.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._jcatalog.cacheTable(tableName)
@since(2.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._jcatalog.uncacheTable(tableName)
@since(2.0)
def clearCache(self):
"""Removes all cached tables from the in-memory cache."""
self._jcatalog.clearCache()
@since(2.0)
def refreshTable(self, tableName):
"""Invalidates and refreshes all the cached data and metadata of the given table."""
self._jcatalog.refreshTable(tableName)
@since('2.1.1')
def recoverPartitions(self, tableName):
"""Recovers all the partitions of the given table and update the catalog.
Only works with a partitioned table, and not a view.
"""
self._jcatalog.recoverPartitions(tableName)
@since('2.2.0')
def refreshByPath(self, path):
"""Invalidates and refreshes all the cached data (and the associated metadata) for any
DataFrame that contains the given data source path.
"""
self._jcatalog.refreshByPath(path)
def _reset(self):
"""(Internal use only) Drop all existing databases (except "default"), tables,
partitions and functions, and set the current database to "default".
This is mainly used for tests.
"""
self._jsparkSession.sessionState().catalog().reset()
def _test():
import os
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.catalog
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.catalog.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.catalog tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.catalog,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
|
import uuid
import threading
import time
import mock
from nose.tools import eq_
from kazoo.exceptions import LockTimeout
from kazoo.testing import KazooTestCase
from kazoo.recipe.partitioner import PartitionState
class SlowLockMock():
"""Emulates a slow ZooKeeper lock."""
default_delay_time = 3
def __init__(self, client, lock, delay_time=None):
self._client = client
self._lock = lock
self.delay_time = self.default_delay_time \
if delay_time is None else delay_time
def acquire(self, timeout=None):
sleep = self._client.handler.sleep_func
sleep(self.delay_time)
if timeout is None:
return self._lock.acquire()
start_time = time.time()
while time.time() - start_time < timeout:
if self._lock.acquire(False):
return True
sleep(0.1)
raise LockTimeout("Mocked slow lock has timed out.")
def release(self):
self._lock.release()
class KazooPartitionerTests(KazooTestCase):
@staticmethod
def make_event():
return threading.Event()
def setUp(self):
super(KazooPartitionerTests, self).setUp()
self.path = "/" + uuid.uuid4().hex
self.__partitioners = []
def test_party_of_one(self):
self.__create_partitioner(size=3)
self.__wait_for_acquire()
self.__assert_state(PartitionState.ACQUIRED)
self.__assert_partitions([0, 1, 2])
self.__finish()
def test_party_of_two(self):
for i in range(2):
self.__create_partitioner(size=2, identifier=str(i))
self.__wait_for_acquire()
self.__assert_partitions([0], [1])
self.__partitioners[0].finish()
self.__wait()
eq_(self.__partitioners[1].release, True)
self.__partitioners[1].finish()
def test_party_expansion(self):
for i in range(2):
self.__create_partitioner(size=3, identifier=str(i))
self.__wait_for_acquire()
self.__assert_state(PartitionState.ACQUIRED)
self.__assert_partitions([0, 2], [1])
for partitioner in self.__partitioners:
partitioner.state_change_event.clear()
# Add another partition, wait till they settle
self.__create_partitioner(size=3, identifier="2")
self.__wait()
self.__assert_state(PartitionState.RELEASE,
partitioners=self.__partitioners[:-1])
for partitioner in self.__partitioners[-1]:
eq_(partitioner.state_change_event.is_set(), True)
self.__release(self.__partitioners[:-1])
self.__wait_for_acquire()
self.__assert_partitions([0], [1], [2])
self.__finish()
def test_more_members_than_set_items(self):
for i in range(2):
self.__create_partitioner(size=1, identifier=str(i))
self.__wait_for_acquire()
self.__assert_state(PartitionState.ACQUIRED)
self.__assert_partitions([0], [])
self.__finish()
def test_party_session_failure(self):
partitioner = self.__create_partitioner(size=3)
self.__wait_for_acquire()
eq_(partitioner.state, PartitionState.ACQUIRED)
# simulate session failure
partitioner._fail_out()
partitioner.release_set()
self.assertTrue(partitioner.failed)
def test_connection_loss(self):
self.__create_partitioner(identifier="0", size=3)
self.__create_partitioner(identifier="1", size=3)
self.__wait_for_acquire()
self.__assert_state(PartitionState.ACQUIRED)
self.__assert_partitions([0, 2], [1])
# Emulate connection loss
self.lose_connection(self.make_event)
self.__assert_state(PartitionState.RELEASE)
self.__release()
# Check that partitioners settle after connection loss
self.__wait_for_acquire()
self.__assert_state(PartitionState.ACQUIRED)
self.__assert_partitions([0, 2], [1])
# Check that partitioners react on new events after connection loss
self.__create_partitioner(identifier="2", size=3)
self.__wait()
self.__assert_state(PartitionState.RELEASE,
partitioners=self.__partitioners[:-1])
self.__release(partitioners=self.__partitioners[:-1])
self.__wait_for_acquire()
self.__assert_state(PartitionState.ACQUIRED)
self.__assert_partitions([0], [1], [2])
def test_race_condition_new_partitioner_during_the_lock(self):
locks = {}
def get_lock(path):
lock = locks.setdefault(path, self.client.handler.lock_object())
return SlowLockMock(self.client, lock)
with mock.patch.object(self.client, "Lock", side_effect=get_lock):
# Create first partitioner. It will start to acquire the set members.
self.__create_partitioner(identifier="0", size=2)
# Wait until the first partitioner has acquired first lock and
# started to acquire the second lock.
self.client.handler.sleep_func(SlowLockMock.default_delay_time + 1)
# Create the second partitioner a the time when the first
# partitioner is in the process of acquiring the lock that should
# belong to the second partitioner.
self.__create_partitioner(identifier="1", size=2)
# The first partitioner should acquire the both locks but then it
# must notice that the party has changed and it must reacquire
# the set. No deadlocks must happen.
self.__wait_for_acquire()
self.__assert_state(PartitionState.ACQUIRED)
self.__assert_partitions([0], [1])
def test_race_condition_new_partitioner_steals_the_lock(self):
locks = {}
def get_lock(path):
new_lock = self.client.handler.lock_object()
lock = locks.setdefault(path, new_lock)
if lock is new_lock:
# The first partitioner will be delayed
delay_time = SlowLockMock.default_delay_time
else:
# The second partitioner won't be delayed
delay_time = 0
return SlowLockMock(self.client, lock, delay_time=delay_time)
with mock.patch.object(self.client, "Lock", side_effect=get_lock):
# Create first partitioner. It will start to acquire the set members.
self.__create_partitioner(identifier="0", size=2)
# Wait until the first partitioner has acquired first lock and
# started to acquire the second lock.
self.client.handler.sleep_func(SlowLockMock.default_delay_time + 1)
# Create the second partitioner a the time when the first
# partitioner is in the process of acquiring the lock that should
# belong to the second partitioner. The second partitioner should
# steal the lock because it won't be delayed.
self.__create_partitioner(identifier="1", size=2)
# The first partitioner should fail to acquire the second lock and
# must notice that the party has changed and it must reacquire the
# set. No deadlocks must happen.
self.__wait_for_acquire()
self.__assert_state(PartitionState.ACQUIRED)
self.__assert_partitions([0], [1])
def __create_partitioner(self, size, identifier=None):
partitioner = self.client.SetPartitioner(
self.path, set=range(size), time_boundary=0.2, identifier=identifier)
self.__partitioners.append(partitioner)
return partitioner
def __wait_for_acquire(self):
for partitioner in self.__partitioners:
partitioner.wait_for_acquire(14)
def __assert_state(self, state, partitioners=None):
if partitioners is None:
partitioners = self.__partitioners
for partitioner in partitioners:
eq_(partitioner.state, state)
def __assert_partitions(self, *partitions):
eq_(len(partitions), len(self.__partitioners))
for partitioner, own_partitions in zip(self.__partitioners, partitions):
eq_(list(partitioner), own_partitions)
def __wait(self):
time.sleep(0.1)
def __release(self, partitioners=None):
if partitioners is None:
partitioners = self.__partitioners
for partitioner in partitioners:
partitioner.release_set()
def __finish(self):
for partitioner in self.__partitioners:
partitioner.finish()
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import os
import pickle
import shutil
from StringIO import StringIO
import sys
import tempfile
import unittest
from genshi.core import Markup
from genshi.input import XML
from genshi.template.base import BadDirectiveError, TemplateSyntaxError
from genshi.template.loader import TemplateLoader, TemplateNotFound
from genshi.template.markup import MarkupTemplate
class MarkupTemplateTestCase(unittest.TestCase):
"""Tests for markup template processing."""
def test_parse_fileobj(self):
fileobj = StringIO('<root> ${var} $var</root>')
tmpl = MarkupTemplate(fileobj)
self.assertEqual('<root> 42 42</root>', str(tmpl.generate(var=42)))
def test_parse_stream(self):
stream = XML('<root> ${var} $var</root>')
tmpl = MarkupTemplate(stream)
self.assertEqual('<root> 42 42</root>', str(tmpl.generate(var=42)))
def test_pickle(self):
stream = XML('<root>$var</root>')
tmpl = MarkupTemplate(stream)
buf = StringIO()
pickle.dump(tmpl, buf, 2)
buf.seek(0)
unpickled = pickle.load(buf)
self.assertEqual('<root>42</root>', str(unpickled.generate(var=42)))
def test_interpolate_mixed3(self):
tmpl = MarkupTemplate('<root> ${var} $var</root>')
self.assertEqual('<root> 42 42</root>', str(tmpl.generate(var=42)))
def test_interpolate_leading_trailing_space(self):
tmpl = MarkupTemplate('<root>${ foo }</root>')
self.assertEqual('<root>bar</root>', str(tmpl.generate(foo='bar')))
def test_interpolate_multiline(self):
tmpl = MarkupTemplate("""<root>${dict(
bar = 'baz'
)[foo]}</root>""")
self.assertEqual('<root>baz</root>', str(tmpl.generate(foo='bar')))
def test_interpolate_non_string_attrs(self):
tmpl = MarkupTemplate('<root attr="${1}"/>')
self.assertEqual('<root attr="1"/>', str(tmpl.generate()))
def test_interpolate_list_result(self):
tmpl = MarkupTemplate('<root>$foo</root>')
self.assertEqual('<root>buzz</root>', str(tmpl.generate(foo=('buzz',))))
def test_empty_attr(self):
tmpl = MarkupTemplate('<root attr=""/>')
self.assertEqual('<root attr=""/>', str(tmpl.generate()))
def test_bad_directive_error(self):
xml = '<p xmlns:py="http://genshi.edgewall.org/" py:do="nothing" />'
try:
tmpl = MarkupTemplate(xml, filename='test.html')
except BadDirectiveError, e:
self.assertEqual('test.html', e.filename)
if sys.version_info[:2] >= (2, 4):
self.assertEqual(1, e.lineno)
def test_directive_value_syntax_error(self):
xml = """<p xmlns:py="http://genshi.edgewall.org/" py:if="bar'" />"""
try:
tmpl = MarkupTemplate(xml, filename='test.html')
self.fail('Expected SyntaxError')
except TemplateSyntaxError, e:
self.assertEqual('test.html', e.filename)
if sys.version_info[:2] >= (2, 4):
self.assertEqual(1, e.lineno)
def test_expression_syntax_error(self):
xml = """<p>
Foo <em>${bar"}</em>
</p>"""
try:
tmpl = MarkupTemplate(xml, filename='test.html')
self.fail('Expected SyntaxError')
except TemplateSyntaxError, e:
self.assertEqual('test.html', e.filename)
if sys.version_info[:2] >= (2, 4):
self.assertEqual(2, e.lineno)
def test_expression_syntax_error_multi_line(self):
xml = """<p><em></em>
${bar"}
</p>"""
try:
tmpl = MarkupTemplate(xml, filename='test.html')
self.fail('Expected SyntaxError')
except TemplateSyntaxError, e:
self.assertEqual('test.html', e.filename)
if sys.version_info[:2] >= (2, 4):
self.assertEqual(3, e.lineno)
def test_markup_noescape(self):
"""
Verify that outputting context data that is a `Markup` instance is not
escaped.
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
$myvar
</div>""")
self.assertEqual("""<div>
<b>foo</b>
</div>""", str(tmpl.generate(myvar=Markup('<b>foo</b>'))))
def test_text_noescape_quotes(self):
"""
Verify that outputting context data in text nodes doesn't escape quotes.
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
$myvar
</div>""")
self.assertEqual("""<div>
"foo"
</div>""", str(tmpl.generate(myvar='"foo"')))
def test_attr_escape_quotes(self):
"""
Verify that outputting context data in attribtes escapes quotes.
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<elem class="$myvar"/>
</div>""")
self.assertEqual("""<div>
<elem class=""foo""/>
</div>""", str(tmpl.generate(myvar='"foo"')))
def test_directive_element(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<py:if test="myvar">bar</py:if>
</div>""")
self.assertEqual("""<div>
bar
</div>""", str(tmpl.generate(myvar='"foo"')))
def test_normal_comment(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<!-- foo bar -->
</div>""")
self.assertEqual("""<div>
<!-- foo bar -->
</div>""", str(tmpl.generate()))
def test_template_comment(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<!-- !foo -->
<!--!bar-->
</div>""")
self.assertEqual("""<div>
</div>""", str(tmpl.generate()))
def test_parse_with_same_namespace_nested(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<span xmlns:py="http://genshi.edgewall.org/">
</span>
</div>""")
self.assertEqual("""<div>
<span>
</span>
</div>""", str(tmpl.generate()))
def test_latin1_encoded_with_xmldecl(self):
tmpl = MarkupTemplate(u"""<?xml version="1.0" encoding="iso-8859-1" ?>
<div xmlns:py="http://genshi.edgewall.org/">
\xf6
</div>""".encode('iso-8859-1'), encoding='iso-8859-1')
self.assertEqual(u"""<?xml version="1.0" encoding="iso-8859-1"?>\n<div>
\xf6
</div>""", unicode(tmpl.generate()))
def test_latin1_encoded_explicit_encoding(self):
tmpl = MarkupTemplate(u"""<div xmlns:py="http://genshi.edgewall.org/">
\xf6
</div>""".encode('iso-8859-1'), encoding='iso-8859-1')
self.assertEqual(u"""<div>
\xf6
</div>""", unicode(tmpl.generate()))
def test_exec_with_trailing_space(self):
"""
Verify that a code block processing instruction with trailing space
does not cause a syntax error (see ticket #127).
"""
MarkupTemplate(u"""<foo>
<?python
bar = 42
?>
</foo>""")
def test_exec_import(self):
tmpl = MarkupTemplate(u"""<?python from datetime import timedelta ?>
<div xmlns:py="http://genshi.edgewall.org/">
${timedelta(days=2)}
</div>""")
self.assertEqual(u"""<div>
2 days, 0:00:00
</div>""", str(tmpl.generate()))
def test_exec_def(self):
tmpl = MarkupTemplate(u"""
<?python
def foo():
return 42
?>
<div xmlns:py="http://genshi.edgewall.org/">
${foo()}
</div>""")
self.assertEqual(u"""<div>
42
</div>""", str(tmpl.generate()))
def test_namespace_on_removed_elem(self):
"""
Verify that a namespace declaration on an element that is removed from
the generated stream does not get pushed up to the next non-stripped
element (see ticket #107).
"""
tmpl = MarkupTemplate("""<?xml version="1.0"?>
<Test xmlns:py="http://genshi.edgewall.org/">
<Size py:if="0" xmlns:t="test">Size</Size>
<Item/>
</Test>""")
self.assertEqual("""<?xml version="1.0"?>\n<Test>
<Item/>
</Test>""", str(tmpl.generate()))
def test_include_in_loop(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included $idx</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="${name}.html" py:for="idx in range(3)" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included 0</div><div>Included 1</div><div>Included 2</div>
</html>""", tmpl.generate(name='tmpl1').render())
finally:
shutil.rmtree(dirname)
def test_dynamic_include_href(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="${name}.html" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate(name='tmpl1').render())
finally:
shutil.rmtree(dirname)
def test_select_included_elements(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<li>$item</li>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<ul py:match="ul">${select('li')}</ul>
<ul py:with="items=(1, 2, 3)">
<xi:include href="tmpl1.html" py:for="item in items" />
</ul>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<ul><li>1</li><li>2</li><li>3</li></ul>
</html>""", tmpl.generate().render())
finally:
shutil.rmtree(dirname)
def test_fallback_when_include_found(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"><xi:fallback>
Missing</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render())
finally:
shutil.rmtree(dirname)
def test_error_when_include_not_found(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"/>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=True)
tmpl = loader.load('tmpl2.html')
self.assertRaises(TemplateNotFound, tmpl.generate().render)
finally:
shutil.rmtree(dirname)
def test_fallback_when_include_not_found(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"><xi:fallback>
Missing</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
Missing
</html>""", tmpl.generate().render())
finally:
shutil.rmtree(dirname)
def test_fallback_when_auto_reload_true(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"><xi:fallback>
Missing</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=True)
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
Missing
</html>""", tmpl.generate().render())
finally:
shutil.rmtree(dirname)
def test_include_in_fallback(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html">
<xi:fallback>
<xi:include href="tmpl1.html">
<xi:fallback>Missing</xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render())
finally:
shutil.rmtree(dirname)
def test_nested_include_fallback(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html">
<xi:fallback>
<xi:include href="tmpl1.html">
<xi:fallback>Missing</xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""<html>
Missing
</html>""", tmpl.generate().render())
finally:
shutil.rmtree(dirname)
def test_nested_include_in_fallback(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html">
<xi:fallback>
<xi:include href="tmpl1.html" />
</xi:fallback>
</xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render())
finally:
shutil.rmtree(dirname)
def test_include_fallback_with_directive(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="tmpl1.html"><xi:fallback>
<py:if test="True">tmpl1.html not found</py:if>
</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
tmpl1.html not found
</html>""", tmpl.generate(debug=True).render())
finally:
shutil.rmtree(dirname)
def test_include_inlined(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="tmpl1.html" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=False)
tmpl = loader.load('tmpl2.html')
# if not inlined the following would be 5
self.assertEqual(7, len(tmpl.stream))
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render())
finally:
shutil.rmtree(dirname)
def test_include_inlined_in_loop(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included $idx</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="tmpl1.html" py:for="idx in range(3)" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=False)
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included 0</div><div>Included 1</div><div>Included 2</div>
</html>""", tmpl.generate().render())
finally:
shutil.rmtree(dirname)
def test_allow_exec_false(self):
xml = ("""<?python
title = "A Genshi Template"
?>
<html xmlns:py="http://genshi.edgewall.org/">
<head>
<title py:content="title">This is replaced.</title>
</head>
</html>""")
try:
tmpl = MarkupTemplate(xml, filename='test.html',
allow_exec=False)
self.fail('Expected SyntaxError')
except TemplateSyntaxError, e:
pass
def test_allow_exec_true(self):
xml = ("""<?python
title = "A Genshi Template"
?>
<html xmlns:py="http://genshi.edgewall.org/">
<head>
<title py:content="title">This is replaced.</title>
</head>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html', allow_exec=True)
def test_exec_in_match(self):
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body/p">
<?python title="wakka wakka wakka" ?>
${title}
</py:match>
<body><p>moot text</p></body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html', allow_exec=True)
self.assertEqual("""<html>
<body>
wakka wakka wakka
</body>
</html>""", tmpl.generate().render())
def test_with_in_match(self):
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body/p">
<h1>${select('text()')}</h1>
${select('.')}
</py:match>
<body><p py:with="foo='bar'">${foo}</p></body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html')
self.assertEqual("""<html>
<body>
<h1>bar</h1>
<p>bar</p>
</body>
</html>""", tmpl.generate().render())
def test_nested_include_matches(self):
# See ticket #157
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<html xmlns:py="http://genshi.edgewall.org/" py:strip="">
<div class="target">Some content.</div>
</html>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:xi="http://www.w3.org/2001/XInclude">
<body>
<h1>Some full html document that includes file1.html</h1>
<xi:include href="tmpl1.html" />
</body>
</html>""")
finally:
file2.close()
file3 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file3.write("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:xi="http://www.w3.org/2001/XInclude" py:strip="">
<div py:match="div[@class='target']" py:attrs="select('@*')">
Some added stuff.
${select('*|text()')}
</div>
<xi:include href="tmpl2.html" />
</html>
""")
finally:
file3.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""
<html>
<body>
<h1>Some full html document that includes file1.html</h1>
<div class="target">
Some added stuff.
Some content.
</div>
</body>
</html>
""", tmpl.generate().render())
finally:
shutil.rmtree(dirname)
def test_nested_matches_without_buffering(self):
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body" once="true" buffer="false">
<body>
${select('*|text')}
And some other stuff...
</body>
</py:match>
<body>
<span py:match="span">Foo</span>
<span>Bar</span>
</body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html')
self.assertEqual("""<html>
<body>
<span>Foo</span>
And some other stuff...
</body>
</html>""", tmpl.generate().render())
def test_match_without_select(self):
# See <http://genshi.edgewall.org/ticket/243>
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body" buffer="false">
<body>
This replaces the other text.
</body>
</py:match>
<body>
This gets replaced.
</body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html')
self.assertEqual("""<html>
<body>
This replaces the other text.
</body>
</html>""", tmpl.generate().render())
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(MarkupTemplate.__module__))
suite.addTest(unittest.makeSuite(MarkupTemplateTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# -*- coding: utf-8 -*-
# (C) 2017 Tampere University of Technology
# MIT License
# Pauli Losoi
"""
Evaluate compound demands and prices.
Constants
---------
RELATIONS_DEMAND
Set of ChEBI relation ID strings used in evaluating demand.
RELATIONS_PRICE
Set of ChEBI relation ID strings used in evaluating price.
CHEBIS_DEMAND
Mapping from CHEBI ID strings to demand integer values.
CHEBIS_PRICE
Mapping from CHEBI ID strings to price integer values.
Functions
---------
evaluate_compound
Return compound value.
evaluate_ontology
Return ontology-derived value.
initialize_graph
Initialize networkx.DiGraph to ontology analysis.
"""
from collections import Counter
from math import sqrt
import networkx as nx
import numpy as np
from exceptions import ReactionIdError
RELATIONS_DEMAND = set([
# 'has_functional_parent',
# 'has_parent_hydride',
# 'has_part',
'has_role',
'is_a',
'is_conjugate_acid_of',
'is_conjugate_base_of',
'is_enantiomer_of',
# 'is_substituent_group_from',
'is_tautomer_of',
])
RELATIONS_PRICE = set([
# 'has_functional_parent',
# 'has_parent_hydride',
'has_part',
'has_role',
'is_a',
'is_conjugate_acid_of',
'is_conjugate_base_of',
'is_enantiomer_of',
'is_substituent_group_from',
'is_tautomer_of',
])
CHEBIS_DEMAND = {
'50906': 1, # role
# Roles (ChEBI 50906)
'33232': 5, # application
'24432': 1, # biological role
'51086': 1, # chemical role
# Applications (ChEBI 33232)
'33286': 1, # agrochemical
'67079': 1, # anti-inflammatory agent
'77964': 1, # anticaking agent
'77973': 1, # antifoaming agent
'64857': 5, # cosmetic
'75358': 1, # curing agent
'27780': 1, # detergent
'37958': 1, # dye
'64047': 1, # food additive
'48318': 5, # fragnance
'33292': 5, # fuel
'77968': 1, # humectant
'47867': 1, # indicator
'35209': 1, # label
'64345': 1, # MALDI matrix material
'25944': 1, # pesticide
'79056': 1, # plasticiser
'50406': 1, # probe
'76414': 1, # propellant
'33893': 3, # reagent
'78433': 1, # refrigerant
'46787': 3, # solvent
'35204': 1, # tracer
'52217': 3, # pharmaceutical
# Biological roles (ChEBI 24432)
'52210': 3, # pharmacological role
'50188': 1, # provitamin
'50913': 1, # fixative
'50846': 1, # immunomodulator
'52206': 1, # biochemical role
'24850': 1, # insect attractant
'73190': 1, # antimutagen
'35222': 1, # inhibitor
'35703': 1, # xenobiotic
# Chemical roles (ChEBI 51086)
'37527': 1, # acid
'22695': 1, # base
'74236': 3, # polymerisation monomer
'62803': 3, # fuel additive
'63046': 1, # emulsifier
'22586': 1, # antioxidant
'63490': 1, # explosive
'46787': 3, # solvent
'35225': 1, # buffer
'35223': 1, # catalyst
'52215': 1, # photochemical role
}
CHEBIS_PRICE = {
# Groups
'33249': 1, # organyl group
'23019': 1, # carbonyl group
'46883': 1, # carboxy group
'51422': 1, # organodiyl group
'79073': 1, # CHOH group
'43176': 1, # hydroxy group
'50860': 1, # organic molecular entity
# Organic molecular entities (ChEBI 50860)
'18059': 1, # lipid
'78840': 1, # olefinic compound
'64709': 1, # organic acid
'50047': 1, # organic amino compound
'33245': 1, # organic fundamental parent
'33822': 1, # organic hydroxy compound
'33635': 1, # organic polycyclic compound
# Lipids (ChEBI 18059)
'35366': 1, # fatty acid
'28868': 1, # fatty acid anion
'35748': 1, # fatty acid ester
'24026': 1, # fatty alcohol
'29348': 1, # fatty amide
'35741': 1, # glycerolipid
'131727': 1, # hydroxylipid
'24913': 1, # isoprenoid
# Olefinic compounds (ChEBI 78840)
'33641': 1, # olefin
# Acyclic olefins (ChEBI 33645)
'32878': 1, # alkene
# Organic fundamental parents (ChEBI 33245)
'24632': 2, # hydrocarbon
# Organic hydroxy compounds (ChEBI 33822)
'30879': 2, # alcohol
'33823': 1, # enol
'33853': 1, # phenols
# Metabolites
'75763': 1, # eukaryotic metabolite
'76924': 1, # plant metabolite
}
class ParseCharacterError(ValueError):
pass
def evaluate_compound(demand, price):
"""
Evaluate compound.
Parameters
----------
demand : number
Compound demand.
price : number
Compound price.
Returns
-------
number
Value of the compound.
Raises
------
TypeError
If demand or price are not numeric.
ValueError
If demand or price is not non-negative.
"""
if not isinstance(demand, (float, int)):
raise TypeError('Input demand type must be float or int.')
elif not isinstance(price, (float, int)):
raise TypeError('Input price type must be float or int.')
elif demand < 0 or price < 0:
raise ValueError('Demand and price must be non-negative.')
return demand * price
def evaluate_ontology(graph, compound, target_values):
"""
Evaluate ontology.
Parameters
----------
graph : networkx.DiGraph
Use initialize_graph to create graph.
compound : string
ChEBI ID.
types : dict
Mapping from ChEBI IDs to value numbers.
Returns
-------
int
Sum of found target values.
"""
values = []
for target, value in target_values.items():
try:
if nx.has_path(graph, compound, target):
values.append(value)
except nx.NetworkXError:
continue
return sum(values)
def initialize_graph(compound_relations, relation_types):
"""
Initialize a compound-centric graph for ChEBI ontology analysis.
Parameters
----------
compound_relations : dict
Mapping from ChEBI ID strings to dicts that map target ChEBI ID
strings to relation type strings.
relation_types : iterable
Relation type_strings to be included in creating edges between
hEBI ID string nodes.
Returns
-------
networkx.DiGraph object
"""
graph = nx.DiGraph()
for compound, targets in compound_relations.items():
for target, relation in targets.items():
if relation in relation_types:
graph.add_edge(compound, target)
return graph
|
|
# Copyright (c) 2014 NetApp, Inc. All Rights Reserved.
# Copyright (c) 2015 Alex Meade. All Rights Reserved.
# Copyright (c) 2015 Rushil Chugh. All Rights Reserved.
# Copyright (c) 2015 Navneet Singh. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iSCSI driver for NetApp E-series storage systems.
"""
import copy
import math
import socket
import time
import uuid
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.eseries import client
from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(na_opts.netapp_basicauth_opts)
CONF.register_opts(na_opts.netapp_connection_opts)
CONF.register_opts(na_opts.netapp_eseries_opts)
CONF.register_opts(na_opts.netapp_transport_opts)
class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
"""Executes commands relating to Volumes."""
VERSION = "1.0.0"
REQUIRED_FLAGS = ['netapp_server_hostname', 'netapp_controller_ips',
'netapp_login', 'netapp_password',
'netapp_storage_pools']
SLEEP_SECS = 5
MAX_LUNS_PER_HOST = 255
HOST_TYPES = {'aix': 'AIX MPIO',
'avt': 'AVT_4M',
'factoryDefault': 'FactoryDefault',
'hpux': 'HP-UX TPGS',
'linux_atto': 'LnxTPGSALUA',
'linux_dm_mp': 'LnxALUA',
'linux_mpp_rdac': 'Linux',
'linux_pathmanager': 'LnxTPGSALUA_PM',
'macos': 'MacTPGSALUA',
'ontap': 'ONTAP',
'svc': 'SVC',
'solaris_v11': 'SolTPGSALUA',
'solaris_v10': 'Solaris',
'vmware': 'VmwTPGSALUA',
'windows':
'Windows 2000/Server 2003/Server 2008 Non-Clustered',
'windows_atto': 'WinTPGSALUA',
'windows_clustered':
'Windows 2000/Server 2003/Server 2008 Clustered'
}
# NOTE(ameade): This maps what is reported by the e-series api to a
# consistent set of values that are reported by all NetApp drivers
# to the cinder scheduler.
SSC_DISK_TYPE_MAPPING = {
'scsi': 'SCSI',
'fibre': 'FCAL',
'sas': 'SAS',
'sata': 'SATA',
}
SSC_UPDATE_INTERVAL = 60 # seconds
WORLDWIDENAME = 'worldWideName'
def __init__(self, *args, **kwargs):
super(NetAppEseriesISCSIDriver, self).__init__(*args, **kwargs)
na_utils.validate_instantiation(**kwargs)
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(
na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(na_opts.netapp_eseries_opts)
self._backend_name = self.configuration.safe_get(
"volume_backend_name") or "NetApp_ESeries"
self._objects = {'disk_pool_refs': [], 'pools': [],
'volumes': {'label_ref': {}, 'ref_vol': {}},
'snapshots': {'label_ref': {}, 'ref_snap': {}}}
self._ssc_stats = {}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
self.context = context
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
port = self.configuration.netapp_server_port
scheme = self.configuration.netapp_transport_type.lower()
if port is None:
if scheme == 'http':
port = 8080
elif scheme == 'https':
port = 8443
self._client = client.RestClient(
scheme=scheme,
host=self.configuration.netapp_server_hostname,
port=port,
service_path=self.configuration.netapp_webservice_path,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password)
self._check_mode_get_or_register_storage_system()
def _start_periodic_tasks(self):
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._update_ssc_info)
ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL)
def check_for_setup_error(self):
self._check_host_type()
self._check_multipath()
self._check_storage_system()
self._populate_system_objects()
self._start_periodic_tasks()
def _check_host_type(self):
self.host_type =\
self.HOST_TYPES.get(self.configuration.netapp_eseries_host_type,
None)
if not self.host_type:
raise exception.NetAppDriverException(
_('Configured host type is not supported.'))
def _check_multipath(self):
if not self.configuration.use_multipath_for_image_xfer:
msg = _LW('Production use of "%(backend)s" backend requires the '
'Cinder controller to have multipathing properly set up '
'and the configuration option "%(mpflag)s" to be set to '
'"True".') % {'backend': self._backend_name,
'mpflag': 'use_multipath_for_image_xfer'}
LOG.warning(msg)
def _check_mode_get_or_register_storage_system(self):
"""Does validity checks for storage system registry and health."""
def _resolve_host(host):
try:
ip = na_utils.resolve_hostname(host)
return ip
except socket.gaierror as e:
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.')
% {'host': host, 'e': e})
raise exception.NoValidHost(
_("Controller IP '%(host)s' could not be resolved: %(e)s.")
% {'host': host, 'e': e})
ips = self.configuration.netapp_controller_ips
ips = [i.strip() for i in ips.split(",")]
ips = [x for x in ips if _resolve_host(x)]
host = na_utils.resolve_hostname(
self.configuration.netapp_server_hostname)
if host in ips:
LOG.info(_LI('Embedded mode detected.'))
system = self._client.list_storage_systems()[0]
else:
LOG.info(_LI('Proxy mode detected.'))
system = self._client.register_storage_system(
ips, password=self.configuration.netapp_sa_password)
self._client.set_system_id(system.get('id'))
def _check_storage_system(self):
"""Checks whether system is registered and has good status."""
try:
system = self._client.list_storage_system()
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
msg = _LI("System with controller addresses [%s] is not"
" registered with web service.")
LOG.info(msg % self.configuration.netapp_controller_ips)
password_not_in_sync = False
if system.get('status', '').lower() == 'passwordoutofsync':
password_not_in_sync = True
new_pwd = self.configuration.netapp_sa_password
self._client.update_stored_system_password(new_pwd)
time.sleep(self.SLEEP_SECS)
sa_comm_timeout = 60
comm_time = 0
while True:
system = self._client.list_storage_system()
status = system.get('status', '').lower()
# wait if array not contacted or
# password was not in sync previously.
if ((status == 'nevercontacted') or
(password_not_in_sync and status == 'passwordoutofsync')):
LOG.info(_LI('Waiting for web service array communication.'))
time.sleep(self.SLEEP_SECS)
comm_time = comm_time + self.SLEEP_SECS
if comm_time >= sa_comm_timeout:
msg = _("Failure in communication between web service and"
" array. Waited %s seconds. Verify array"
" configuration parameters.")
raise exception.NetAppDriverException(msg %
sa_comm_timeout)
else:
break
msg_dict = {'id': system.get('id'), 'status': status}
if (status == 'passwordoutofsync' or status == 'notsupported' or
status == 'offline'):
msg = _("System %(id)s found with bad status - %(status)s.")
raise exception.NetAppDriverException(msg % msg_dict)
LOG.info(_LI("System %(id)s has %(status)s status.") % msg_dict)
return True
def _populate_system_objects(self):
"""Get all system objects into cache."""
self._cache_allowed_disk_pool_refs()
for vol in self._client.list_volumes():
self._cache_volume(vol)
for sn in self._client.list_snapshot_groups():
self._cache_snap_grp(sn)
for image in self._client.list_snapshot_images():
self._cache_snap_img(image)
def _cache_allowed_disk_pool_refs(self):
"""Caches disk pools refs as per pools configured by user."""
d_pools = self.configuration.netapp_storage_pools
LOG.info(_LI('Configured storage pools %s.'), d_pools)
pools = [x.strip().lower() if x else None for x in d_pools.split(',')]
for pool in self._client.list_storage_pools():
if (pool.get('raidLevel') == 'raidDiskPool'
and pool['label'].lower() in pools):
self._objects['disk_pool_refs'].append(pool['volumeGroupRef'])
self._objects['pools'].append(pool)
def _cache_volume(self, obj):
"""Caches volumes for further reference."""
if (obj.get('volumeUse') == 'standardVolume' and obj.get('label')
and obj.get('volumeRef')
and obj.get('volumeGroupRef') in
self._objects['disk_pool_refs']):
self._objects['volumes']['label_ref'][obj['label']]\
= obj['volumeRef']
self._objects['volumes']['ref_vol'][obj['volumeRef']] = obj
def _cache_snap_grp(self, obj):
"""Caches snapshot groups."""
if (obj.get('label') and obj.get('pitGroupRef') and
obj.get('baseVolume') in self._objects['volumes']['ref_vol']):
self._objects['snapshots']['label_ref'][obj['label']] =\
obj['pitGroupRef']
self._objects['snapshots']['ref_snap'][obj['pitGroupRef']] = obj
def _cache_snap_img(self, image):
"""Caches snapshot image under corresponding snapshot group."""
group_id = image.get('pitGroupRef')
sn_gp = self._objects['snapshots']['ref_snap']
if group_id in sn_gp:
sn_gp[group_id]['images'] = sn_gp[group_id].get('images') or []
sn_gp[group_id]['images'].append(image)
def _cache_vol_mapping(self, mapping):
"""Caches volume mapping in volume object."""
vol_id = mapping['volumeRef']
volume = self._objects['volumes']['ref_vol'][vol_id]
volume['listOfMappings'] = volume.get('listOfMappings') or []
for mapp in volume['listOfMappings']:
if mapp['lunMappingRef'] == mapping['lunMappingRef']:
return
volume['listOfMappings'].append(mapping)
def _del_volume_frm_cache(self, label):
"""Deletes volume from cache."""
vol_id = self._objects['volumes']['label_ref'].get(label)
if vol_id:
self._objects['volumes']['ref_vol'].pop(vol_id, True)
self._objects['volumes']['label_ref'].pop(label)
else:
LOG.debug("Volume %s not cached.", label)
def _del_snapshot_frm_cache(self, obj_name):
"""Deletes snapshot group from cache."""
snap_id = self._objects['snapshots']['label_ref'].get(obj_name)
if snap_id:
self._objects['snapshots']['ref_snap'].pop(snap_id, True)
self._objects['snapshots']['label_ref'].pop(obj_name)
else:
LOG.debug("Snapshot %s not cached.", obj_name)
def _del_vol_mapping_frm_cache(self, mapping):
"""Deletes volume mapping under cached volume."""
vol_id = mapping['volumeRef']
volume = self._objects['volumes']['ref_vol'].get(vol_id) or {}
mappings = volume.get('listOfMappings') or []
try:
mappings.remove(mapping)
except ValueError:
LOG.debug("Mapping with id %s already removed.",
mapping['lunMappingRef'])
def _get_volume(self, uid):
label = utils.convert_uuid_to_es_fmt(uid)
return self._get_volume_with_label_wwn(label)
def _get_volume_with_label_wwn(self, label=None, wwn=None):
"""Searches volume with label or wwn or both."""
if not (label or wwn):
raise exception.InvalidInput(_('Either volume label or wwn'
' is required as input.'))
try:
return self._get_cached_volume(label)
except KeyError:
wwn = wwn.replace(':', '').upper() if wwn else None
for vol in self._client.list_volumes():
if label and vol.get('label') != label:
continue
if wwn and vol.get(self.WORLDWIDENAME).upper() != wwn:
continue
self._cache_volume(vol)
label = vol.get('label')
break
return self._get_cached_volume(label)
def _get_cached_volume(self, label):
vol_id = self._objects['volumes']['label_ref'][label]
return self._objects['volumes']['ref_vol'][vol_id]
def _get_cached_snapshot_grp(self, uid):
label = utils.convert_uuid_to_es_fmt(uid)
snap_id = self._objects['snapshots']['label_ref'][label]
return self._objects['snapshots']['ref_snap'][snap_id]
def _get_cached_snap_grp_image(self, uid):
group = self._get_cached_snapshot_grp(uid)
images = group.get('images')
if images:
sorted_imgs = sorted(images, key=lambda x: x['pitTimestamp'])
return sorted_imgs[0]
msg = _("No pit image found in snapshot group %s.") % group['label']
raise exception.NotFound(msg)
def _is_volume_containing_snaps(self, label):
"""Checks if volume contains snapshot groups."""
vol_id = self._objects['volumes']['label_ref'].get(label)
snp_grps = self._objects['snapshots']['ref_snap'].values()
for snap in snp_grps:
if snap['baseVolume'] == vol_id:
return True
return False
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
eseries_volume = self._get_volume(volume['name_id'])
for pool in self._objects['pools']:
if pool['volumeGroupRef'] == eseries_volume['volumeGroupRef']:
return pool['label']
return None
def create_volume(self, volume):
"""Creates a volume."""
LOG.debug('create_volume on %s' % volume['host'])
# get E-series pool label as pool name
eseries_pool_label = volume_utils.extract_host(volume['host'],
level='pool')
if eseries_pool_label is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['name_id'])
# get size of the requested volume creation
size_gb = int(volume['size'])
vol = self._create_volume(eseries_pool_label, eseries_volume_label,
size_gb)
self._cache_volume(vol)
def _create_volume(self, eseries_pool_label, eseries_volume_label,
size_gb):
"""Creates volume with given label and size."""
target_pool = None
pools = self._client.list_storage_pools()
for pool in pools:
if pool["label"] == eseries_pool_label:
target_pool = pool
break
if not target_pool:
msg = _("Pools %s does not exist")
raise exception.NetAppDriverException(msg % eseries_pool_label)
try:
vol = self._client.create_volume(target_pool['volumeGroupRef'],
eseries_volume_label, size_gb)
LOG.info(_LI("Created volume with "
"label %s."), eseries_volume_label)
except exception.NetAppDriverException as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error creating volume. Msg - %s."),
six.text_type(e))
return vol
def _schedule_and_create_volume(self, label, size_gb):
"""Creates volume with given label and size."""
avl_pools = self._get_sorted_avl_storage_pools(size_gb)
for pool in avl_pools:
try:
vol = self._client.create_volume(pool['volumeGroupRef'],
label, size_gb)
LOG.info(_LI("Created volume with label %s."), label)
return vol
except exception.NetAppDriverException as e:
LOG.error(_LE("Error creating volume. Msg - %s."), e)
msg = _("Failure creating volume %s.")
raise exception.NetAppDriverException(msg % label)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
label = utils.convert_uuid_to_es_fmt(volume['id'])
size = volume['size']
dst_vol = self._schedule_and_create_volume(label, size)
try:
src_vol = None
src_vol = self._create_snapshot_volume(snapshot['id'])
self._copy_volume_high_prior_readonly(src_vol, dst_vol)
self._cache_volume(dst_vol)
LOG.info(_LI("Created volume with label %s."), label)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
self._client.delete_volume(dst_vol['volumeRef'])
finally:
if src_vol:
try:
self._client.delete_snapshot_volume(src_vol['id'])
except exception.NetAppDriverException as e:
LOG.error(_LE("Failure deleting snap vol. Error: %s."), e)
else:
LOG.warning(_LW("Snapshot volume not found."))
def _create_snapshot_volume(self, snapshot_id):
"""Creates snapshot volume for given group with snapshot_id."""
group = self._get_cached_snapshot_grp(snapshot_id)
LOG.debug("Creating snap vol for group %s", group['label'])
image = self._get_cached_snap_grp_image(snapshot_id)
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
capacity = int(image['pitCapacity']) / units.Gi
storage_pools = self._get_sorted_avl_storage_pools(capacity)
s_id = storage_pools[0]['volumeGroupRef']
return self._client.create_snapshot_volume(image['pitRef'], label,
group['baseVolume'], s_id)
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
"""Copies src volume to dest volume."""
LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s.")
% {'src': src_vol['label'], 'dst': dst_vol['label']})
try:
job = None
job = self._client.create_volume_copy_job(src_vol['id'],
dst_vol['volumeRef'])
while True:
j_st = self._client.list_vol_copy_job(job['volcopyRef'])
if (j_st['status'] == 'inProgress' or j_st['status'] ==
'pending' or j_st['status'] == 'unknown'):
time.sleep(self.SLEEP_SECS)
continue
if (j_st['status'] == 'failed' or j_st['status'] == 'halted'):
LOG.error(_LE("Vol copy job status %s."), j_st['status'])
msg = _("Vol copy job for dest %s failed.")\
% dst_vol['label']
raise exception.NetAppDriverException(msg)
LOG.info(_LI("Vol copy job completed for dest %s.")
% dst_vol['label'])
break
finally:
if job:
try:
self._client.delete_vol_copy_job(job['volcopyRef'])
except exception.NetAppDriverException:
LOG.warning(_LW("Failure deleting "
"job %s."), job['volcopyRef'])
else:
LOG.warning(_LW('Volume copy job for src vol %s not found.'),
src_vol['id'])
LOG.info(_LI('Copy job to dest vol %s completed.'), dst_vol['label'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
snapshot = {'id': uuid.uuid4(), 'volume_id': src_vref['id']}
self.create_snapshot(snapshot)
try:
self.create_volume_from_snapshot(volume, snapshot)
finally:
try:
self.delete_snapshot(snapshot)
except exception.NetAppDriverException:
LOG.warning(_LW("Failure deleting temp snapshot %s."),
snapshot['id'])
def delete_volume(self, volume):
"""Deletes a volume."""
try:
vol = self._get_volume(volume['name_id'])
self._delete_volume(vol['label'])
except KeyError:
LOG.info(_LI("Volume %s already deleted."), volume['id'])
return
def _delete_volume(self, label):
"""Deletes an array volume."""
vol_id = self._objects['volumes']['label_ref'].get(label)
if vol_id:
self._client.delete_volume(vol_id)
self._del_volume_frm_cache(label)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
snap_grp, snap_image = None, None
snapshot_name = utils.convert_uuid_to_es_fmt(snapshot['id'])
os_vol = self.db.volume_get(self.context, snapshot['volume_id'])
vol = self._get_volume(os_vol['name_id'])
vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi
pools = self._get_sorted_avl_storage_pools(vol_size_gb)
try:
snap_grp = self._client.create_snapshot_group(
snapshot_name, vol['volumeRef'], pools[0]['volumeGroupRef'])
self._cache_snap_grp(snap_grp)
snap_image = self._client.create_snapshot_image(
snap_grp['pitGroupRef'])
self._cache_snap_img(snap_image)
LOG.info(_LI("Created snap grp with label %s."), snapshot_name)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
if snap_image is None and snap_grp:
self.delete_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
try:
snap_grp = self._get_cached_snapshot_grp(snapshot['id'])
except KeyError:
LOG.warning(_LW("Snapshot %s already deleted.") % snapshot['id'])
return
self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
snapshot_name = snap_grp['label']
self._del_snapshot_frm_cache(snapshot_name)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a volume."""
pass
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
initiator_name = connector['initiator']
vol = self._get_volume(volume['name_id'])
iscsi_details = self._get_iscsi_service_details()
iscsi_portal = self._get_iscsi_portal_for_vol(vol, iscsi_details)
mapping = self._map_volume_to_host(vol, initiator_name)
lun_id = mapping['lun']
self._cache_vol_mapping(mapping)
msg = _("Mapped volume %(id)s to the initiator %(initiator_name)s.")
msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
msg = _("Successfully fetched target details for volume %(id)s and "
"initiator %(initiator_name)s.")
LOG.debug(msg % msg_fmt)
iqn = iscsi_portal['iqn']
address = iscsi_portal['ip']
port = iscsi_portal['tcp_port']
properties = na_utils.get_iscsi_connection_properties(lun_id, volume,
iqn, address,
port)
return properties
def _get_iscsi_service_details(self):
"""Gets iscsi iqn, ip and port information."""
ports = []
hw_inventory = self._client.list_hardware_inventory()
iscsi_ports = hw_inventory.get('iscsiPorts')
if iscsi_ports:
for port in iscsi_ports:
if (port.get('ipv4Enabled') and port.get('iqn') and
port.get('ipv4Data') and
port['ipv4Data'].get('ipv4AddressData') and
port['ipv4Data']['ipv4AddressData']
.get('ipv4Address') and port['ipv4Data']
['ipv4AddressData'].get('configState')
== 'configured'):
iscsi_det = {}
iscsi_det['ip'] =\
port['ipv4Data']['ipv4AddressData']['ipv4Address']
iscsi_det['iqn'] = port['iqn']
iscsi_det['tcp_port'] = port.get('tcpListenPort')
iscsi_det['controller'] = port.get('controllerId')
ports.append(iscsi_det)
if not ports:
msg = _('No good iscsi portals found for %s.')
raise exception.NetAppDriverException(
msg % self._client.get_system_id())
return ports
def _get_iscsi_portal_for_vol(self, volume, portals, anyController=True):
"""Get the iscsi portal info relevant to volume."""
for portal in portals:
if portal.get('controller') == volume.get('currentManager'):
return portal
if anyController and portals:
return portals[0]
msg = _('No good iscsi portal found in supplied list for %s.')
raise exception.NetAppDriverException(
msg % self._client.get_system_id())
@cinder_utils.synchronized('map_es_volume')
def _map_volume_to_host(self, vol, initiator):
"""Maps the e-series volume to host with initiator."""
host = self._get_or_create_host(initiator, self.host_type)
vol_maps = self._get_host_mapping_for_vol_frm_array(vol)
for vol_map in vol_maps:
if vol_map.get('mapRef') == host['hostRef']:
return vol_map
else:
self._client.delete_volume_mapping(vol_map['lunMappingRef'])
self._del_vol_mapping_frm_cache(vol_map)
mappings = self._get_vol_mapping_for_host_frm_array(host['hostRef'])
lun = self._get_free_lun(host, mappings)
return self._client.create_volume_mapping(vol['volumeRef'],
host['hostRef'], lun)
def _get_or_create_host(self, port_id, host_type):
"""Fetch or create a host by given port."""
try:
host = self._get_host_with_port(port_id)
ht_def = self._get_host_type_definition(host_type)
if host.get('hostTypeIndex') == ht_def.get('index'):
return host
else:
try:
return self._client.update_host_type(
host['hostRef'], ht_def)
except exception.NetAppDriverException as e:
msg = _LW("Unable to update host type for host with "
"label %(l)s. %(e)s")
LOG.warning(msg % {'l': host['label'], 'e': e.msg})
return host
except exception.NotFound as e:
LOG.warning(_LW("Message - %s."), e.msg)
return self._create_host(port_id, host_type)
def _get_host_with_port(self, port_id):
"""Gets or creates a host with given port id."""
hosts = self._client.list_hosts()
for host in hosts:
if host.get('hostSidePorts'):
ports = host.get('hostSidePorts')
for port in ports:
if (port.get('type') == 'iscsi'
and port.get('address') == port_id):
return host
msg = _("Host with port %(port)s not found.")
raise exception.NotFound(msg % {'port': port_id})
def _create_host(self, port_id, host_type):
"""Creates host on system with given initiator as port_id."""
LOG.info(_LI("Creating host with port %s."), port_id)
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
port_label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
host_type = self._get_host_type_definition(host_type)
return self._client.create_host_with_port(label, host_type,
port_id, port_label)
def _get_host_type_definition(self, host_type):
"""Gets supported host type if available on storage system."""
host_types = self._client.list_host_types()
for ht in host_types:
if ht.get('name', 'unknown').lower() == host_type.lower():
return ht
raise exception.NotFound(_("Host type %s not supported.") % host_type)
def _get_free_lun(self, host, maps=None):
"""Gets free LUN for given host."""
ref = host['hostRef']
luns = maps or self._get_vol_mapping_for_host_frm_array(ref)
used_luns = set(map(lambda lun: int(lun['lun']), luns))
for lun in xrange(self.MAX_LUNS_PER_HOST):
if lun not in used_luns:
return lun
msg = _("No free LUNs. Host might exceeded max LUNs.")
raise exception.NetAppDriverException(msg)
def _get_vol_mapping_for_host_frm_array(self, host_ref):
"""Gets all volume mappings for given host from array."""
mappings = self._client.get_volume_mappings() or []
host_maps = filter(lambda x: x.get('mapRef') == host_ref, mappings)
return host_maps
def _get_host_mapping_for_vol_frm_array(self, volume):
"""Gets all host mappings for given volume from array."""
mappings = self._client.get_volume_mappings() or []
host_maps = filter(lambda x: x.get('volumeRef') == volume['volumeRef'],
mappings)
return host_maps
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
vol = self._get_volume(volume['name_id'])
host = self._get_host_with_port(connector['initiator'])
mapping = self._get_cached_vol_mapping_for_host(vol, host)
self._client.delete_volume_mapping(mapping['lunMappingRef'])
self._del_vol_mapping_frm_cache(mapping)
def _get_cached_vol_mapping_for_host(self, volume, host):
"""Gets cached volume mapping for given host."""
mappings = volume.get('listOfMappings') or []
for mapping in mappings:
if mapping.get('mapRef') == host['hostRef']:
return mapping
msg = _("Mapping not found for %(vol)s to host %(ht)s.")
raise exception.NotFound(msg % {'vol': volume['volumeRef'],
'ht': host['hostRef']})
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service."""
if refresh:
if not self._ssc_stats:
self._update_ssc_info()
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Update volume statistics."""
LOG.debug("Updating volume stats.")
data = dict()
data["volume_backend_name"] = self._backend_name
data["vendor_name"] = "NetApp"
data["driver_version"] = self.VERSION
data["storage_protocol"] = "iSCSI"
data["pools"] = []
pools = self._client.list_storage_pools()
for pool in pools:
cinder_pool = {}
cinder_pool["pool_name"] = pool.get("label", 0)
cinder_pool["QoS_support"] = False
cinder_pool["reserved_percentage"] = 0
if pool["volumeGroupRef"] in self._objects["disk_pool_refs"]:
tot_bytes = int(pool.get("totalRaidedSpace", 0))
used_bytes = int(pool.get("usedSpace", 0))
cinder_pool["free_capacity_gb"] = ((tot_bytes - used_bytes) /
units.Gi)
cinder_pool["total_capacity_gb"] = tot_bytes / units.Gi
pool_ssc_stats = self._ssc_stats.get(pool["volumeGroupRef"])
if pool_ssc_stats:
cinder_pool.update(pool_ssc_stats)
data["pools"].append(cinder_pool)
self._stats = data
self._garbage_collect_tmp_vols()
@cinder_utils.synchronized("netapp_update_ssc_info", external=False)
def _update_ssc_info(self):
"""Periodically runs to update ssc information from the backend.
The self._ssc_stats attribute is updated with the following format.
{<volume_group_ref> : {<ssc_key>: <ssc_value>}}
"""
LOG.info(_LI("Updating storage service catalog information for "
"backend '%s'") % self._backend_name)
self._ssc_stats = \
self._update_ssc_disk_encryption(self._objects["disk_pool_refs"])
self._ssc_stats = \
self._update_ssc_disk_types(self._objects["disk_pool_refs"])
def _update_ssc_disk_types(self, volume_groups):
"""Updates the given ssc dictionary with new disk type information.
:param volume_groups: The volume groups this driver cares about
"""
ssc_stats = copy.deepcopy(self._ssc_stats)
all_disks = self._client.list_drives()
relevant_disks = filter(lambda x: x.get('currentVolumeGroupRef') in
volume_groups, all_disks)
for drive in relevant_disks:
current_vol_group = drive.get('currentVolumeGroupRef')
if current_vol_group not in ssc_stats:
ssc_stats[current_vol_group] = {}
if drive.get("driveMediaType") == 'ssd':
ssc_stats[current_vol_group]['netapp_disk_type'] = 'SSD'
else:
disk_type = drive.get('interfaceType').get('driveType')
ssc_stats[current_vol_group]['netapp_disk_type'] = \
self.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown')
return ssc_stats
def _update_ssc_disk_encryption(self, volume_groups):
"""Updates the given ssc dictionary with new disk encryption information.
:param volume_groups: The volume groups this driver cares about
"""
ssc_stats = copy.deepcopy(self._ssc_stats)
all_pools = self._client.list_storage_pools()
relevant_pools = filter(lambda x: x.get('volumeGroupRef') in
volume_groups, all_pools)
for pool in relevant_pools:
current_vol_group = pool.get('volumeGroupRef')
if current_vol_group not in ssc_stats:
ssc_stats[current_vol_group] = {}
ssc_stats[current_vol_group]['netapp_disk_encryption'] = 'true' \
if pool['securityType'] == 'enabled' else 'false'
return ssc_stats
def _get_sorted_avl_storage_pools(self, size_gb):
"""Returns storage pools sorted on available capacity."""
size = size_gb * units.Gi
pools = self._client.list_storage_pools()
sorted_pools = sorted(pools, key=lambda x:
(int(x.get('totalRaidedSpace', 0))
- int(x.get('usedSpace', 0))), reverse=True)
avl_pools = [x for x in sorted_pools
if (x['volumeGroupRef'] in
self._objects['disk_pool_refs']) and
(int(x.get('totalRaidedSpace', 0)) -
int(x.get('usedSpace', 0) >= size))]
if not avl_pools:
msg = _LW("No storage pool found with available capacity %s.")
LOG.warning(msg % size_gb)
return avl_pools
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
stage_1, stage_2 = 0, 0
src_vol = self._get_volume(volume['name_id'])
src_label = src_vol['label']
stage_label = 'tmp-%s' % utils.convert_uuid_to_es_fmt(uuid.uuid4())
extend_vol = {'id': uuid.uuid4(), 'size': new_size}
self.create_cloned_volume(extend_vol, volume)
new_vol = self._get_volume(extend_vol['id'])
try:
stage_1 = self._client.update_volume(src_vol['id'], stage_label)
stage_2 = self._client.update_volume(new_vol['id'], src_label)
new_vol = stage_2
self._cache_volume(new_vol)
self._cache_volume(stage_1)
LOG.info(_LI('Extended volume with label %s.'), src_label)
except exception.NetAppDriverException:
if stage_1 == 0:
with excutils.save_and_reraise_exception():
self._client.delete_volume(new_vol['id'])
if stage_2 == 0:
with excutils.save_and_reraise_exception():
self._client.update_volume(src_vol['id'], src_label)
self._client.delete_volume(new_vol['id'])
def _garbage_collect_tmp_vols(self):
"""Removes tmp vols with no snapshots."""
try:
if not na_utils.set_safe_attr(self, 'clean_job_running', True):
LOG.warning(_LW('Returning as clean tmp '
'vol job already running.'))
return
for label in self._objects['volumes']['label_ref'].keys():
if (label.startswith('tmp-') and
not self._is_volume_containing_snaps(label)):
try:
self._delete_volume(label)
except exception.NetAppDriverException:
LOG.debug("Error deleting vol with label %s.",
label)
finally:
na_utils.set_safe_attr(self, 'clean_job_running', False)
@cinder_utils.synchronized('manage_existing')
def manage_existing(self, volume, existing_ref):
"""Brings an existing storage object under Cinder management."""
vol = self._get_existing_vol_with_manage_ref(volume, existing_ref)
label = utils.convert_uuid_to_es_fmt(volume['id'])
if label == vol['label']:
LOG.info(_LI("Volume with given ref %s need not be renamed during"
" manage operation."), existing_ref)
managed_vol = vol
else:
managed_vol = self._client.update_volume(vol['id'], label)
self._del_volume_frm_cache(vol['label'])
self._cache_volume(managed_vol)
LOG.info(_LI("Manage operation completed for volume with new label"
" %(label)s and wwn %(wwn)s."),
{'label': label, 'wwn': managed_vol[self.WORLDWIDENAME]})
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
vol = self._get_existing_vol_with_manage_ref(volume, existing_ref)
return int(math.ceil(float(vol['capacity']) / units.Gi))
def _get_existing_vol_with_manage_ref(self, volume, existing_ref):
try:
return self._get_volume_with_label_wwn(
existing_ref.get('source-name'), existing_ref.get('source-id'))
except exception.InvalidInput:
reason = _('Reference must contain either source-name'
' or source-id element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
except KeyError:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_('Volume not found on configured storage pools.'))
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. Logs a
message to indicate the volume is no longer under Cinder's control.
"""
managed_vol = self._get_volume(volume['id'])
LOG.info(_LI("Unmanaged volume with current label %(label)s and wwn "
"%(wwn)s."), {'label': managed_vol['label'],
'wwn': managed_vol[self.WORLDWIDENAME]})
|
|
# Natural Language Toolkit: Discourse Processing
#
# Author: Ewan Klein <[email protected]>
#
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
# $Id: discourse.py 5783 2008-02-27 14:51:45Z ehk $
from nltk.sem import root_semrep, Expression
from nltk import parse
from nltk.inference import Mace, spacer, get_prover
from nltk.data import show_cfg
import os
"""
Module for incrementally developing simple discourses, and checking for semantic ambiguity,
consistency and informativeness.
Many of the ideas are based on the CURT family of programs of Blackburn and Bos
(see U{http://homepages.inf.ed.ac.uk/jbos/comsem/book1.html}).
Consistency checking is carried out by using the L{mace} module to call the Mace4 model builder.
Informativeness checking is carried out with a call to C{get_prover()} from
the L{inference} module.
C{DiscourseTester} is a constructor for discourses.
The basic data structure is a list of sentences, stored as C{self._sentences}. Each sentence in the list
is assigned a I{sentence ID} (C{sid}) of the form C{s}I{i}. For example::
s0: A boxer walks
s1: Every boxer chases a girl
Each sentence can be ambiguous between a number of readings, each of which receives a
I{reading ID} (C{rid}) of the form C{s}I{i} -C{r}I{j}. For example::
s0 readings:
------------------------------
s0-r1: some x.((boxer x) and (walk x))
s0-r0: some x.((boxerdog x) and (walk x))
A I{thread} is a list of readings, represented
as a list of C{rid}s. Each thread receives a I{thread ID} (C{tid}) of the form C{d}I{i}.
For example::
d0: ['s0-r0', 's1-r0']
The set of all threads for a discourse is the Cartesian product of all the readings of the sequences of sentences.
(This is not intended to scale beyond very short discourses!) The method L{readings(filter=True)} will only show
those threads which are consistent (taking into account any background assumptions).
"""
class DiscourseTester(object):
"""
Check properties of an ongoing discourse.
"""
def __init__(self, input, gramfile=None, background=None):
"""
Initialize a C{DiscourseTester}.
@parameter input: the discourse sentences
@type input: C{list} of C{str}
@parameter gramfile: name of file where grammar can be loaded
@type gramfile: C{str}
@parameter background: Formulas which express background assumptions
@type background: C{list} of L{logic.Expression}.
"""
self._input = input
self._sentences = dict([('s%s' % i, sent) for i, sent in enumerate(input)])
self._models = None
self._readings = {}
if gramfile is None:
self._gramfile = 'grammars/sem4.fcfg'
else:
self._gramfile = gramfile
self._threads = {}
self._filtered_threads = {}
self._parser = parse.load_earley(self._gramfile)
if background is not None:
for e in background:
assert isinstance(e, Expression)
self._background = background
else:
self._background = []
###############################
# Sentences
###############################
def sentences(self):
"""
Display the list of sentences in the current discourse.
"""
for id in sorted(self._sentences.keys()):
print "%s: %s" % (id, self._sentences[id])
def add_sentence(self, sentence, informchk=False, consistchk=False,):
"""
Add a sentence to the current discourse.
Updates C{self._input} and C{self._sentences}.
@parameter sentence: An input sentence
@type sentence: C{str}
@parameter informchk: if C{True}, check that the result of adding the sentence is thread-informative. Updates C{self._readings}.
@parameter consistchk: if C{True}, check that the result of adding the sentence is thread-consistent. Updates C{self._readings}.
"""
# check whether the new sentence is informative (i.e. not entailed by the previous discourse)
if informchk:
self.readings(quiet=True)
for tid in sorted(self._threads.keys()):
assumptions = [reading for (rid, reading) in self.expand_threads(tid)]
assumptions += self._background
for sent_reading in self._get_readings(sentence):
tp = get_prover(goal=sent_reading, assumptions=assumptions)
if tp.prove():
print "Sentence '%s' under reading '%s':" % (sentence, str(sent_reading.infixify()))
print "Not informative relative to thread '%s'" % tid
self._input.append(sentence)
self._sentences = dict([('s%s' % i, sent) for i, sent in enumerate(self._input)])
# check whether adding the new sentence to the discourse preserves consistency (i.e. a model can be found for the combined set of
# of assumptions
if consistchk:
self.readings(quiet=True)
self.models(show=False)
def retract_sentence(self, sentence, quiet=False):
"""
Remove a sentence from the current discourse.
Updates C{self._input}, C{self._sentences} and C{self._readings}.
@parameter sentence: An input sentence
@type sentence: C{str}
@parameter quiet: If C{False}, report on the updated list of sentences.
"""
self._input.remove(sentence)
self._sentences = dict([('s%s' % i, sent) for i, sent in enumerate(self._input)])
self.readings(quiet=True)
if not quiet:
print "Current sentences are "
for sent in self._sentences:
print sent
def grammar(self):
"""
Print out the grammar in use for parsing input sentences
"""
show_cfg(self._gramfile)
###############################
# Readings and Threads
###############################
def _get_readings(self, sentence):
"""
Build a list of semantic readings for a sentence.
@rtype: C{list} of L{logic.Expression}.
"""
tokens = sentence.split()
trees = self._parser.nbest_parse(tokens)
return [root_semrep(tree) for tree in trees]
def _construct_readings(self):
"""
Use C{self._sentences} to construct a value for C{self._readings}.
"""
for sid in self._sentences:
readings = self._get_readings(self._sentences[sid])
self._readings[sid] = dict([("%s-r%s" % (sid, rid), reading)
for rid, reading in enumerate(readings)])
def _construct_threads(self):
"""
Use C{self._readings} to construct a value for C{self._threads}
and use the model builder to construct a value for C{self._filtered_threads}
"""
thread_list = [[]]
for sid in sorted(self._readings.keys()):
thread_list = self.multiply(thread_list, sorted(self._readings[sid].keys()))
self._threads = dict([("d%s" % tid, thread) for tid, thread in enumerate(thread_list)])
# re-initialize the filtered threads
self._filtered_threads = {}
# keep the same ids, but only include threads which get models
for (tid, thread) in self._threads.items():
if (tid, True) in self._check_consistency(self._threads):
self._filtered_threads[tid] = thread
def _show_readings(self, sentence=None):
"""
Print out the readings for the discourse (or a single sentence).
"""
if sentence is not None:
print "The sentence '%s' has these readings:" % sentence
for r in [str(reading.infixify()) for reading in (self._get_readings(sentence))]:
print " %s" % r
else:
for sid in sorted(self._readings.keys()):
print
print '%s readings:' % sid
print '-' * 30
for rid in self._readings[sid]:
lf = str(self._readings[sid][rid].infixify())
print "%s: %s" % (rid, lf)
def _show_threads(self, filter=False):
"""
Print out the value of C{self._threads} or C{self._filtered_hreads}
"""
if filter:
threads = self._filtered_threads
else:
threads = self._threads
for tid in sorted(threads.keys()):
print "%s:" % tid, self._threads[tid]
def readings(self, sentence=None, threaded=False, quiet=False, filter=False):
"""
Construct and show the readings of the discourse (or of a single sentence).
@parameter sentence: test just this sentence
@type sentence: C{str}
@parameter threaded: if C{True}, print out each thread ID and the corresponding thread.
@parameter filter: if C{True}, only print out consistent thread IDs and threads.
"""
self._construct_readings()
self._construct_threads()
# if we are filtering, just show threads
if filter: threaded=True
if not quiet:
if not threaded:
self._show_readings(sentence=sentence)
else:
self._show_threads(filter=filter)
def expand_threads(self, thread_id, threads=None):
"""
Given a thread ID, find the list of L{logic.Expression}s corresponding to the reading IDs in that thread.
@parameter thread_id: thread ID
@type thread_id: C{str}
@parameter threads: a mapping from thread IDs to lists of reading IDs
@type threads: C{dict}
@return: A list of pairs (C{rid}, I{reading}) where I{reading} is the L{logic.Expression} associated with a reading ID
@rtype: C{list} of C{tuple}
"""
if threads is None:
threads = self._threads
return [(rid, self._readings[sid][rid]) for rid in threads[thread_id] for sid in rid.split('-')[:1]]
###############################
# Models and Background
###############################
def _check_consistency(self, threads, show=False, quiet=True):
results = []
for tid in sorted(threads.keys()):
assumptions = [reading for (rid, reading) in self.expand_threads(tid, threads=threads)]
assumptions += self._background
# if Mace4 finds a model, it always seems to find it quickly
mb = Mace('', assumptions, timeout=2)
modelfound = mb.build_model()
results.append((tid, modelfound))
if show:
spacer(80)
print "Model for Discourse Thread %s" % tid
spacer(80)
if not quiet:
for a in assumptions:
print a.infixify()
spacer(80)
if modelfound:
mb.show_model(format='cooked')
else:
print "No model found!\n"
return results
def models(self, thread_id=None, show=True, quiet=True):
"""
Call Mace4 to build a model for each current discourse thread.
@parameter thread_id: thread ID
@type thread_id: C{str}
@parameter show: If C{True}, display the model that has been found.
"""
self._construct_readings()
self._construct_threads()
if thread_id is None:
threads = self._threads
else:
threads = {thread_id: self._threads[thread_id]}
for (tid, modelfound) in self._check_consistency(threads, show=show, quiet=quiet):
idlist = [rid for rid in threads[tid]]
if not modelfound:
print "Inconsistent discourse %s %s:" % (tid, idlist)
for rid, reading in [(rid, str(reading.infixify())) for (rid, reading) in self.expand_threads(tid)]:
print " %s: %s" % (rid, reading)
print
else:
print "Consistent discourse: %s %s:" % (tid, idlist)
for rid, reading in [(rid, str(reading.infixify())) for (rid, reading) in self.expand_threads(tid)]:
print " %s: %s" % (rid, reading)
print
def add_background(self, background, quiet=False):
"""
Add a list of background assumptions for reasoning about the discourse.
When called, this method also updates the discourse model's set of readings and threads.
@parameter background: Formulas which contain background information
@type background: C{list} of L{logic.Expression}.
"""
for (count, e) in enumerate(background):
assert isinstance(e, Expression)
if not quiet:
print "Adding assumption %s to background" % count
self._background.append(e)
#update the state
self._construct_readings()
self._construct_threads()
def background(self):
"""
Show the current background assumptions.
"""
for e in self._background:
print str(e.infixify())
###############################
# Misc
###############################
@staticmethod
def multiply(discourse, readings):
"""
Multiply every thread in C{discourse} by every reading in C{readings}.
Given discourse = [['A'], ['B']], readings = ['a', 'b', 'c'] , returns
[['A', 'a'], ['A', 'b'], ['A', 'c'], ['B', 'a'], ['B', 'b'], ['B', 'c']]
@parameter discourse: the current list of readings
@type discourse: C{list} of C{list}s
@parameter readings: an additional list of readings
@type readings: C{list} of C{logic.Expression}s
@rtype: A C{list} of C{list}s
"""
result = []
for sublist in discourse:
for r in readings:
new = []
new += sublist
new.append(r)
result.append(new)
return result
#multiply = DiscourseTester.multiply
#L1 = [['A'], ['B']]
#L2 = ['a', 'b', 'c']
#print multiply(L1,L2)
def parse_fol(s):
"""
Temporarily duplicated from L{nltk.sem.util}.
Convert a file of First Order Formulas into a list of C{Expression}s.
@parameter s: the contents of the file
@type s: C{str}
@return: a list of parsed formulas.
@rtype: C{list} of L{Expression}
"""
from nltk.sem import LogicParser
statements = []
lp = LogicParser()
for linenum, line in enumerate(s.splitlines()):
line = line.strip()
if line.startswith('#') or line=='': continue
try:
statements.append(lp.parse(line))
except Error:
raise ValueError, 'Unable to parse line %s: %s' % (linenum, line)
return statements
###############################
# Demo
###############################
def discourse_demo():
"""
Illustrate the various methods of C{DiscourseTester}
"""
dt = DiscourseTester(['A boxer walks', 'Every boxer chases a girl'])
dt.models()
print
#dt.grammar()
print
dt.sentences()
print
dt.readings()
print
dt.readings(threaded=True)
print
dt.models('d1')
dt.add_sentence('John is a boxer')
print
dt.sentences()
print
dt.readings(threaded=True)
print
dt = DiscourseTester(['A student dances', 'Every student is a person'])
print
dt.add_sentence('No person dances', consistchk=True)
print
dt.readings()
print
dt.retract_sentence('No person dances', quiet=False)
print
dt.models()
print
dt.readings('A person dances')
print
dt.add_sentence('A person dances', informchk=True)
dt = DiscourseTester(['Vincent is a boxer', 'Fido is a boxer', 'Vincent is married', 'Fido barks'])
dt.readings(filter=True)
import nltk.data
world = nltk.data.load('/grammars/world.fol')
print
dt.add_background(world, quiet=True)
dt.background()
print
dt.readings(filter=True)
print
dt.models()
if __name__ == '__main__':
discourse_demo()
|
|
import msgpack
import json
import pickle
import os.path
from Queue import PriorityQueue
import re
import doench_score
import azimuth.model_comparison
import numpy as np
import pandas as pd
import csv
from intervaltree import IntervalTree
class GuideRNA():
"""Holder of gRNA information"""
def __init__(self, selected, start, seq, PAM, score, exon_ranking, ensembl_gene, gene_name, functional_domain, has_exome_repeat):
self.start = start
self.seq = seq
self.PAM = PAM
self.score = score
self.exon_ranking = exon_ranking
self.ensembl_gene = ensembl_gene
self.gene_name = gene_name
self.selected = selected
self.functional_domain = functional_domain
if functional_domain:
self.has_functional_domain = True
else:
self.has_functional_domain = False
self.has_exome_repeat = has_exome_repeat
def serialize_for_display(self):
"""Serialize for the way we are returning json"""
serialization = {
"score": self.score,
"start": self.start,
"seq": self.seq,
"PAM": self.PAM,
"selected": self.selected,
"has_exome_repeat": self.has_exome_repeat
}
if self.functional_domain != None:
serialization["functional_domain"] = self.functional_domain
return serialization
def __cmp__(self, other):
def cmp_scheme(g):
return (-g.has_exome_repeat, g.has_functional_domain, g.score)
return cmp(cmp_scheme(self), cmp_scheme(other))
params = {
"PAM": "NGG",
"protospacer_len": 20,
"prime5": True,
"scoring": "Azimuth",
"quantity": 100,
"functional_domains": True,
"mer_len": 10
}
# azimuth model
print "loading azimuth models"
azimuth_saved_model_dir = os.path.join(os.path.dirname(azimuth.__file__), 'saved_models')
model_name = 'V3_model_full.pickle'
azimuth_model_file = os.path.join(azimuth_saved_model_dir, model_name)
with open(azimuth_model_file, 'rb') as f:
azimuth_model = pickle.load(f)
azimuth_scores_file = 'azimuth_scores.p'
with open(azimuth_scores_file, 'rb') as inp:
azimuth_scores = pickle.load(inp)
def get_azimuth_score(mer30):
if mer30 in azimuth_scores:
return azimuth_scores[mer30]
else:
score = azimuth.model_comparison.predict(np.array([mer30]), aa_cut=None, percent_peptide=None, model=azimuth_model, model_file=azimuth_model_file)[0]
print "generating Azimuth", mer30, score
azimuth_scores[mer30] = score
return score
# load in exome
exome_path_hum = 'exome_hum.txt'
mer_len = params['mer_len']
# process kmers
# consider all kmers which are followed by NGG
print "preparing hum kmers"
with open(exome_path_hum, 'r') as input:
exome = input.read()
exome_mers = {}
for i in range(len(exome) - mer_len - 3):
if exome[i + mer_len + 1 : i + mer_len + 3] == "GG":
s = exome[i:i + mer_len]
if s in exome_mers:
exome_mers[s] += 1
else:
exome_mers[s] = 1
# takes in guide OBJECT
# returns whether there is a duplicate in exome
def hasExomeRepeat(protospacer):
guide_seq = protospacer[-mer_len:] # get PAM-proximal mer_len bases
hits = exome_mers[guide_seq] # how many times does occur in genome followed by NGG?
return hits < 2
# Create interval tree for functional domains
print "constructing interval tuples"
interval_tuples_dict = {}
ucsc_pfam_f = '../functional_domains/ucsc_pfam.txt'
with open(ucsc_pfam_f, 'r') as pfam_csv:
csvreader = csv.reader(pfam_csv, delimiter='\t')
next(csvreader) # skip header
for row in csvreader:
chrom = row[1]
start = row[2]
end = row[3]
name = row[4]
if chrom not in interval_tuples_dict:
interval_tuples_dict[chrom] = []
new_tuple = (int(start), int(end), name)
interval_tuples_dict[chrom].append(new_tuple)
print "constructing interval trees"
interval_trees_dict = {}
for k, v in interval_tuples_dict.iteritems():
interval_trees_dict[k] = IntervalTree.from_tuples(v)
modPAM = params["PAM"].upper()
modPAM = modPAM.replace('N', '[ATCG]')
params["modPAM"] = modPAM
params["PAM_len"] = len(params["PAM"])
revcompl = lambda x: ''.join([{'A':'T','C':'G','G':'C','T':'A','N':'N'}[B] for B in x][::-1])
print "constructing refGene"
refGeneFilename = '../gtex/refGene.txt'
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
refGene["exonStarts"] = refGene.apply(lambda x: x['exonStarts'].split(',')[:-1], axis=1)
refGene["exonEnds"] = refGene.apply(lambda x: x['exonEnds'].split(',')[:-1], axis=1)
refGene["exonFrames"] = refGene.apply(lambda x: x['exonFrames'].split(',')[:-1], axis=1)
def gene_exon_coords(gene, exon):
try:
start = list(refGene.loc[refGene['name'] == gene]['exonStarts'])[0][exon]
end = list(refGene.loc[refGene['name'] == gene]['exonEnds'])[0][exon]
chrom = list(refGene.loc[refGene['name'] == gene]['chrom'])[0]
return {
'start': int(start),
'end': int(end),
'chrom': str(chrom)
}
except IndexError:
return None
def gene_exon_file(gene, exon):
filename = gene + "_" + str(exon)
seq_path = os.path.join('../GRCh37_exons/', filename)
if os.path.isfile(seq_path):
with open(seq_path) as infile:
return infile.read()
else:
return None
print "beginning gene by gene processing"
with open('genes_list.json') as genes_list_file:
genes_list = json.load(genes_list_file)
# gene format: {"ensembl_id": "ENSG00000261122.2", "name": "5S_rRNA", "description": ""}
for gene in genes_list:
exon = 0
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
while seq:
# Check if we haven't done this in a previous run of the program
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
folder = '../GRCh37_guides_msgpack_' + params["scoring"] + '/'
if params['functional_domains']:
folder = '../GRCh37_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
if os.path.isfile(output_path):
# prepare next exon
exon += 1
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
continue
q = PriorityQueue()
def process_guide(m, selected, max_queue_size, seq, domain):
if 'N' in seq:
return
PAM_start = m.start()
score = 0
if params["scoring"] == "Doench":
# Doench score requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = doench_score.calc_score(mer30)
elif params["scoring"] == "Azimuth":
# Azimuth requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = get_azimuth_score(mer30)
protospacer = ""
PAM = ""
if params["prime5"]:
protospacer = seq[PAM_start-params["protospacer_len"]:PAM_start]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
else:
protospacer = seq[PAM_start+params["PAM_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
has_exome_repeat = hasExomeRepeat(protospacer)
potential_gRNA = GuideRNA(selected, PAM_start-params["protospacer_len"], protospacer, PAM, score, exon, gene["ensembl_id"], gene["name"], domain, has_exome_repeat)
# If there's enough room, add it, no question.
if q.qsize() < max_queue_size:
q.put(potential_gRNA)
# Otherwise, take higher score
else:
lowest_gRNA = q.get()
if cmp(potential_gRNA, lowest_gRNA) == 1: # if potential_gRNA > lowest_gRNA
q.put(potential_gRNA)
else:
q.put(lowest_gRNA)
for m in re.finditer(params["modPAM"], seq):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG": # spCas9
cut_site = coords['start'] + m.start() - 3
chrom = 'chr' + coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq, domain)
seq_rc = revcompl(seq)
for m in re.finditer(params["modPAM"], seq_rc):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG": #spCas9
cut_site = coords['end'] - m.start() + 3
chrom = 'chr' + coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq_rc, domain)
# Pop gRNAs into our 'permanent' storage
gRNAs = []
while not q.empty():
gRNA = q.get()
gRNAs.append(gRNA.serialize_for_display())
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
folder = '../GRCh37_guides_msgpack_' + params['scoring'] + '/'
if params['functional_domains']:
folder = '../GRCh37_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
with open(output_path, 'w') as outfile:
# Reverse gRNAs list.
# Want highest on-target first.
msgpack.dump(gRNAs[::-1], outfile)
# prepare next exon
exon += 1
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
with open('azimuth_scores.p', 'wb') as output:
pickle.dump(azimuth_scores, output)
|
|
"""
========
numpydoc
========
Sphinx extension that handles docstrings in the Numpy standard format. [1]
It will:
- Convert Parameters etc. sections to field lists.
- Convert See Also section to a See also entry.
- Renumber references.
- Extract the signature from the docstring, if it can't be determined
otherwise.
.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
"""
import re
import pydoc
import inspect
import collections
import hashlib
from docutils.nodes import citation, Text
import sphinx
from sphinx.addnodes import pending_xref, desc_content
if sphinx.__version__ < '1.0.1':
raise RuntimeError("Sphinx 1.0.1 or newer is required")
from .docscrape_sphinx import get_doc_object, SphinxDocString
from . import __version__
HASH_LEN = 12
def rename_references(app, what, name, obj, options, lines):
# decorate reference numbers so that there are no duplicates
# these are later undecorated in the doctree, in relabel_references
references = set()
for line in lines:
line = line.strip()
m = re.match('^.. \\[(%s)\\]' % app.config.numpydoc_citation_re,
line, re.I)
if m:
references.add(m.group(1))
if references:
# we use a hash to mangle the reference name to avoid invalid names
sha = hashlib.sha256()
sha.update(name.encode('utf8'))
prefix = 'R' + sha.hexdigest()[:HASH_LEN]
for r in references:
new_r = prefix + '-' + r
for i, line in enumerate(lines):
lines[i] = lines[i].replace('[%s]_' % r,
'[%s]_' % new_r)
lines[i] = lines[i].replace('.. [%s]' % r,
'.. [%s]' % new_r)
def _ascend(node, cls):
while node and not isinstance(node, cls):
node = node.parent
return node
def relabel_references(app, doc):
# Change 'hash-ref' to 'ref' in label text
for citation_node in doc.traverse(citation):
if _ascend(citation_node, desc_content) is None:
# no desc node in ancestry -> not in a docstring
# XXX: should we also somehow check it's in a References section?
continue
label_node = citation_node[0]
prefix, _, new_label = label_node[0].astext().partition('-')
assert len(prefix) == HASH_LEN + 1
new_text = Text(new_label)
label_node.replace(label_node[0], new_text)
for id in citation_node['backrefs']:
ref = doc.ids[id]
ref_text = ref[0]
# Sphinx has created pending_xref nodes with [reftext] text.
def matching_pending_xref(node):
return (isinstance(node, pending_xref) and
node[0].astext() == '[%s]' % ref_text)
for xref_node in ref.parent.traverse(matching_pending_xref):
xref_node.replace(xref_node[0], Text('[%s]' % new_text))
ref.replace(ref_text, new_text.copy())
DEDUPLICATION_TAG = ' !! processed by numpydoc !!'
def mangle_docstrings(app, what, name, obj, options, lines):
if DEDUPLICATION_TAG in lines:
return
cfg = {'use_plots': app.config.numpydoc_use_plots,
'use_blockquotes': app.config.numpydoc_use_blockquotes,
'show_class_members': app.config.numpydoc_show_class_members,
'show_inherited_class_members':
app.config.numpydoc_show_inherited_class_members,
'class_members_toctree': app.config.numpydoc_class_members_toctree}
u_NL = '\n'
if what == 'module':
# Strip top title
pattern = '^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'
title_re = re.compile(pattern, re.I | re.S)
lines[:] = title_re.sub('', u_NL.join(lines)).split(u_NL)
else:
doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg,
builder=app.builder)
doc = str(doc)
lines[:] = doc.split(u_NL)
if (app.config.numpydoc_edit_link and hasattr(obj, '__name__') and
obj.__name__):
if hasattr(obj, '__module__'):
v = dict(full_name="{}.{}".format(obj.__module__, obj.__name__))
else:
v = dict(full_name=obj.__name__)
lines += ['', '.. htmlonly::', '']
lines += [' %s' % x for x in
(app.config.numpydoc_edit_link % v).split("\n")]
# call function to replace reference numbers so that there are no
# duplicates
rename_references(app, what, name, obj, options, lines)
lines += ['..', DEDUPLICATION_TAG]
def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
(not hasattr(obj, '__init__') or
'initializes x; see ' in pydoc.getdoc(obj.__init__))):
return '', ''
if not (isinstance(obj, collections.Callable) or
hasattr(obj, '__argspec_is_invalid_')):
return
if not hasattr(obj, '__doc__'):
return
doc = SphinxDocString(pydoc.getdoc(obj))
sig = doc['Signature'] or getattr(obj, '__text_signature__', None)
if sig:
sig = re.sub("^[^(]*", "", sig)
return sig, ''
def setup(app, get_doc_object_=get_doc_object):
if not hasattr(app, 'add_config_value'):
return # probably called by nose, better bail out
global get_doc_object
get_doc_object = get_doc_object_
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('autodoc-process-signature', mangle_signature)
app.connect('doctree-read', relabel_references)
app.add_config_value('numpydoc_edit_link', None, False)
app.add_config_value('numpydoc_use_plots', None, False)
app.add_config_value('numpydoc_use_blockquotes', None, False)
app.add_config_value('numpydoc_show_class_members', True, True)
app.add_config_value('numpydoc_show_inherited_class_members', True, True)
app.add_config_value('numpydoc_class_members_toctree', True, True)
app.add_config_value('numpydoc_citation_re', '[a-z0-9_.-]+', True)
# Extra mangling domains
app.add_domain(NumpyPythonDomain)
app.add_domain(NumpyCDomain)
app.setup_extension('sphinx.ext.autosummary')
metadata = {'version': __version__,
'parallel_read_safe': True}
return metadata
# ------------------------------------------------------------------------------
# Docstring-mangling domains
# ------------------------------------------------------------------------------
from docutils.statemachine import ViewList
from sphinx.domains.c import CDomain
from sphinx.domains.python import PythonDomain
class ManglingDomainBase:
directive_mangling_map = {}
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
self.wrap_mangling_directives()
def wrap_mangling_directives(self):
for name, objtype in list(self.directive_mangling_map.items()):
self.directives[name] = wrap_mangling_directive(
self.directives[name], objtype)
class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
name = 'np'
directive_mangling_map = {
'function': 'function',
'class': 'class',
'exception': 'class',
'method': 'function',
'classmethod': 'function',
'staticmethod': 'function',
'attribute': 'attribute',
}
indices = []
class NumpyCDomain(ManglingDomainBase, CDomain):
name = 'np-c'
directive_mangling_map = {
'function': 'function',
'member': 'attribute',
'macro': 'function',
'type': 'class',
'var': 'object',
}
def match_items(lines, content_old):
"""Create items for mangled lines.
This function tries to match the lines in ``lines`` with the items (source
file references and line numbers) in ``content_old``. The
``mangle_docstrings`` function changes the actual docstrings, but doesn't
keep track of where each line came from. The manging does many operations
on the original lines, which are hard to track afterwards.
Many of the line changes come from deleting or inserting blank lines. This
function tries to match lines by ignoring blank lines. All other changes
(such as inserting figures or changes in the references) are completely
ignored, so the generated line numbers will be off if ``mangle_docstrings``
does anything non-trivial.
This is a best-effort function and the real fix would be to make
``mangle_docstrings`` actually keep track of the ``items`` together with
the ``lines``.
Examples
--------
>>> lines = ['', 'A', '', 'B', ' ', '', 'C', 'D']
>>> lines_old = ['a', '', '', 'b', '', 'c']
>>> items_old = [('file1.py', 0), ('file1.py', 1), ('file1.py', 2),
... ('file2.py', 0), ('file2.py', 1), ('file2.py', 2)]
>>> content_old = ViewList(lines_old, items=items_old)
>>> match_items(lines, content_old) # doctest: +NORMALIZE_WHITESPACE
[('file1.py', 0), ('file1.py', 0), ('file2.py', 0), ('file2.py', 0),
('file2.py', 2), ('file2.py', 2), ('file2.py', 2), ('file2.py', 2)]
>>> # first 2 ``lines`` are matched to 'a', second 2 to 'b', rest to 'c'
>>> # actual content is completely ignored.
Notes
-----
The algorithm tries to match any line in ``lines`` with one in
``lines_old``. It skips over all empty lines in ``lines_old`` and assigns
this line number to all lines in ``lines``, unless a non-empty line is
found in ``lines`` in which case it goes to the next line in ``lines_old``.
"""
items_new = []
lines_old = content_old.data
items_old = content_old.items
j = 0
for i, line in enumerate(lines):
# go to next non-empty line in old:
# line.strip() checks whether the string is all whitespace
while j < len(lines_old) - 1 and not lines_old[j].strip():
j += 1
items_new.append(items_old[j])
if line.strip() and j < len(lines_old) - 1:
j += 1
assert(len(items_new) == len(lines))
return items_new
def wrap_mangling_directive(base_directive, objtype):
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
if self.content:
items = match_items(lines, self.content)
self.content = ViewList(lines, items=items,
parent=self.content.parent)
return base_directive.run(self)
return directive
|
|
import os
import subprocess
import readline
import sys
from contextlib import redirect_stdout
#def __init__(self, cmd_output,dir= 'blank', dev='blank'):
# if dir is 'blank':
# print("Directory is blank")
# self.dir=get_dir()
#
# if dev is 'blank':
# print("Device is blank")
# self.dev=self.get_dev()
#
# if cmd_output is None:
# print("Need cmd")
#
total_list= ['']
file_list= ['']
directory_list= ['']
def get_dir(dir_path):
if dir_path is None:
return input("Please enter the directory: ")
else:
return dir_path
def get_dev(dev_path):
if dir_path is None:
return input("Please enter the device: ")
else:
return dev_path
def list_directories(input):
# make a list out of the input from running the ntfsls command
#print(input)
with open('out.log', 'w') as file:
with redirect_stdout(file):
print(input)
with open('out.log', 'r') as file:
list = [line.rstrip('\n') for line in file]
#print(list)
#
return list
def print_out_list(input_list):
for line in input_list:
print( str(input_list.index(line)) + " : " + line )
def get_file_path():
return input("Enter the directory for the NTFS drive : ")
def select_option(input_list):
print_out_list(input_list)
cur_list = input_list
option = input("Select your option: ")
if option == 'back':
return option
if option == 'grab_all':
return option
if option == 'grab_all_quiet':
return option
else:
try:
return input_list[int(option)]
except Exception as e:
print("Something happened %s /n Selection an option again: " % str(e))
select_option(input_list)
def file_or_directory_curdir(input, f_list, d_list):
for item in input:
if '"' in item:
input.remove(item)
elif '.' in item:
#print(item + " is a file")
f_list.append(item)
elif '.' not in item:
#print(item + " is a directory")
d_list.append(item)
else:
print("Cannot determine if its a file or directory")
return f_list, d_list, input
def run_ls(dev,path=''):
output = subprocess.getoutput("sudo ntfsls -f %s -p '%s'" % (dev, path))
return output
def grab_file(file_path, file_name, device_path, auto_yes = 'no'):
if auto_yes =='yes':
print("Grabbing %s...." % file_path)
subprocess.getoutput("sudo ntfscat -f %s %s | dd of='%s' " % (device_path, file_path, file_name))
else:
response = input("""Do you want me to grab : """ + file_path + "[yes/no]? ")
if response == 'yes' or response == 'y':
print("Grabbing file.......")
subprocess.getoutput("sudo ntfscat -f %s %s | dd of='%s' " % (device_path, file_path, file_name))
else:
print("Restarting.....")
def if_a_file(object):
if "." in object:
return True
else:
return False
def grab_all_file(input_list, device_path, directory_path , auto_yes = 'no'):
for item in input_list:
target = (" '%s/%s' " % (directory_path , item))
grab_file(target,item,device_path, 'yes')
def run_main(device_path="", directory_path="" , last_directory="/"):
stop = False
while stop == False:
total_list= ['']
file_list= ['']
directory_list= ['']
current_selection = ''
print(device_path)
print(directory_path)
print(last_directory)
raw_output = run_ls(device_path,directory_path)
total_list =list_directories(raw_output)
file_or_directory_curdir(total_list, file_list, directory_list)
#print(file_list)
print("****")
#print(directory_list)
print("****")
print("****")
print("****")
#print_out_list(total_list)
selection_name = select_option(total_list)
print("*******/n/n******")
if selection_name == 'back':
directory_path = last_directory
run_main(device_path, directory_path)
for item in file_list:
if ' ' in item:
file_list.remove(item)
else:
print("%s passed file" % item)
result = directory_path+ '/'+ selection_name
if selection_name == 'grab_all':
grab_all_file(file_list, device_path, directory_path)
print("$$$$$$")
run_main(device_path,directory_path)
elif selection_name == 'grab_all_quiet':
print("grabbing all files****!!@@@")
grab_all_file(file_list, device_path, directory_path, 'yes')
run_main(device_path, device_path)
if if_a_file(result):
print("%s is a file" % result)
grab_file(result, selection_name, device_path)
else:
print("%s is a directory" % result)
ans = input("Do you want me to open [yes/no/back]? ")
if ans == 'yes' or ans =='y':
last_directory = directory_path
directory_path = result
run_main(device_path,directory_path)
elif ans == 'back':
print("Running again....")
run_main(device_path,directory_path)
else:
print("Unknow option running again...")
run_main(device_path, directory_path)
quit = input("Continue running [Yes/No]: ")
if quit == 'no' or quit == 'n':
stop = True
else:
print("Run again.....")
if __name__ == "__main__":
dir = input("Please enter the directory: ")
if dir is '':
print("DIR: /Users/Owner/Pictures")
dev = input("Please enter the device (/dev/sda#): ")
if dev is '':
print("DIR: /dev/sdd4")
dev = "/dev/sdd4"
last_directory = "/"
run_main(dev,dir, last_directory)
|
|
# -*- coding: utf-8 -*-
#
# Created on 7/28/13
"""Common settings and globals."""
from os import listdir, mkdir
from os.path import abspath, basename, dirname, join, normpath, isdir, exists
from sys import path, stderr
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
ADMIN_URL = '/admin/'
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# Module name to format dates and hours with specific locales
FORMAT_MODULE_PATH = 'project.formats'
# Default formatting for time and date
DATE_FORMAT = 'd.m.Y'
DATETIME_FORMAT = 'd.m.Y H:i'
TIME_FORMAT = 'H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
LOCALE_PATHS = tuple(join(join(SITE_ROOT, subdir), 'locale') for subdir in
listdir(SITE_ROOT) if isdir(join(SITE_ROOT, subdir)))
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
def define_assets_path(local_root_path):
"""Get or create system path to resources"""
# Absolute filesystem path to the directory that will hold user-uploaded
# files. Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = join(local_root_path, 'media')
try:
if exists(local_root_path) and not exists(MEDIA_ROOT):
mkdir(MEDIA_ROOT)
except OSError:
# Need this to log into stderr for tracking problems.
# On Apache, this will be redirect to the ErrorLog.
print >>stderr, 'Cannot create {0} folder'.format(MEDIA_ROOT)
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = join(local_root_path, 'assets')
try:
if exists(local_root_path) and not exists(STATIC_ROOT):
mkdir(STATIC_ROOT)
except OSError:
print >>stderr, 'Cannot create {0} folder'.format(STATIC_ROOT)
return MEDIA_ROOT, STATIC_ROOT
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = r"{{ secret_key }}"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'sekizai.context_processors.sekizai',
'{{ project_name }}.context_processors.debug_local',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'johnny.middleware.LocalStoreClearMiddleware',
# 'johnny.middleware.QueryCacheMiddleware',
]
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
# Database migration helpers:
'south',
'raven',
'sekizai',
'django_extensions',
'compressor',
)
# Apps specific for this project go here.
LOCAL_APPS = (
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
def define_logger(log_level, logs_path_sys=None):
"""Create the dict of parameters for logging."""
if not logs_path_sys or not exists(logs_path_sys):
logs_path = join(SITE_ROOT, 'logs')
if not exists(logs_path):
try:
mkdir(logs_path)
except OSError:
# Need this to log into stderr for tracking problems.
# On Apache, this will be redirect to the ErrorLog.
print >>stderr, 'Cannot create {0} folder'.format(logs_path)
else:
logs_path = logs_path_sys
logging_dict = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'standard': {
'format': '%(levelname)s %(asctime)s %(name)s.%(module)s.'
'%(funcName)s:L%(lineno)d ProcessNo:%(process)d/'
'ThreadNo:%(thread)d "%(message)s"',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'normative': {
'format': '%(levelname)s %(asctime)s %(module)s.'
'%(funcName)s:L%(lineno)d "%(message)s"',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
'errors': {
'level': 'WARNING',
'class': '{{ project_name }}.utils.print_helpers'
'.SplitStreamHandler',
'formatter': 'normative'
},
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.handlers.SentryHandler',
},
'default_file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': join(logs_path, '{{ project_name }}.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'tests_file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': join(logs_path, '{{ project_name }}-tests.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'include_html': True,
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['sentry'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['sentry', 'mail_admins', 'errors'],
'propagate': False,
'level': log_level,
},
'django.db.backends': {
'level': 'ERROR',
'handlers': ['sentry', 'console', 'errors'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['sentry', 'console'],
'propagate': True,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['sentry', 'console'],
'propagate': False,
},
'default': {
'handlers': ['sentry', 'default_file', 'errors', 'console'],
'propagate': True,
'level': 'INFO',
},
'test': {
'handlers': ['tests_file', 'errors', 'console'],
'propagate': True,
'level': 'DEBUG',
},
}
}
if log_level == 'DEBUG':
# make all loggers use the console.
for logger in logging_dict['loggers']:
logging_dict['loggers'][logger]['handlers'] = ['console']
return logging_dict
########## END LOGGING CONFIGURATION
########## CSS / JS / LESS COMPRESSOR (require dep: lessc)
COMPRESS_ENABLED = False
COMPRESS_OUTPUT_DIR = 'c'
STATICFILES_FINDERS += ('compressor.finders.CompressorFinder',)
#COMPRESS_CACHE_BACKEND = 'compressor'
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
]
COMPRESS_JS_FILTERS = ['compressor.filters.jsmin.JSMinFilter']
COMPRESS_PRECOMPILERS = (
('text/less', '/usr/local/bin/lessc {infile} {outfile}'),
)
COMPRESS_PARSER = 'compressor.parser.HtmlParser'
########## END COMPRESSION CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '{{ project_name}}.wsgi.application'
########## END WSGI CONFIGURATION
########## TOOLBAR CONFIGURATION
ALLOWED_DEBUGGERS = []
def set_toolbar():
"""Set toolbar options"""
def benchmark_callback(request):
"""Check if 'benchmark' is in GET parameters to deactivate
debug_toolbar"""
if request.user.username in ALLOWED_DEBUGGERS:
return True
else:
return False
extra_installed_apps = (
'debug_toolbar',
)
MIDDLEWARE_CLASSES.append(
'debug_toolbar.middleware.DebugToolbarMiddleware',)
# IPs allowed to see django-debug-toolbar output.
INTERNAL_IPS = ('127.0.0.1', '0.0.0.0', '192.168.*.*',)
DEBUG_TOOLBAR_CONFIG = {
# If set to True (default), the debug toolbar will show an
# intermediate page upon redirect so you can view any debug
# information prior to redirecting. This page will provide a link
# to the redirect destination you can follow when ready. If set to
# False, redirects will proceed as normal.
'INTERCEPT_REDIRECTS': False,
# If not set or set to None, the debug_toolbar middleware will use
# its built-in show_toolbar method for determining whether the
# toolbar should show or not. The default checks are that DEBUG
# must be set to True and the IP of the request must be in
# INTERNAL_IPS. You can provide your own method for displaying the
# toolbar which contains your custom logic.
# method should return True or False.
'SHOW_TOOLBAR_CALLBACK': benchmark_callback,
# An array of custom signals that might be in your project, defined
# as the python path to the signal.
'EXTRA_SIGNALS': [],
# If set to True (the default) then code in Django itself won't be
# shown in SQL stacktraces.
'HIDE_DJANGO_SQL': True,
# If set to True (the default) then a template's context will be
# included with it in the Template debug panel. Turning this off is
# useful when you have large template contexts, or you have template
# contexts with lazy data structures that you don't want to be
# evaluated.
'SHOW_TEMPLATE_CONTEXT': True,
# If set, this will be the tag to which debug_toolbar will attach
# the debug toolbar. Defaults to 'body'.
'TAG': 'body',
# If set, this will show stacktraces for SQL queries and cache calls.
# Enabling stacktraces can increase the CPU time used when executing
# queries. Defaults to True.
'ENABLE_STACKTRACES': True,
}
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.cache.CacheDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
return extra_installed_apps
########## END TOOLBAR CONFIGURATION
|
|
from __future__ import division
import math
import os
import sys
import re
import shutil
import tempfile
import codecs
import subprocess
import atexit
import weakref
import matplotlib as mpl
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib import _png, rcParams
from matplotlib import font_manager
from matplotlib.ft2font import FT2Font
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.cbook import check_output
###############################################################################
# create a list of system fonts, all of these should work with xe/lua-latex
system_fonts = []
for f in font_manager.findSystemFonts():
try:
system_fonts.append(FT2Font(str(f)).family_name)
except RuntimeError:
pass # some fonts on osx are known to fail, print?
except:
pass # unknown error, skip this font
def get_texcommand():
"""Get chosen TeX system from rc."""
texsystem_options = ["xelatex", "lualatex", "pdflatex"]
texsystem = rcParams.get("pgf.texsystem", "xelatex")
return texsystem if texsystem in texsystem_options else "xelatex"
def get_fontspec():
"""Build fontspec preamble from rc."""
latex_fontspec = []
texcommand = get_texcommand()
if texcommand is not "pdflatex":
latex_fontspec.append(r"\usepackage{fontspec}")
if texcommand is not "pdflatex" and rcParams.get("pgf.rcfonts", True):
# try to find fonts from rc parameters
families = ["serif", "sans-serif", "monospace"]
fontspecs = [r"\setmainfont{%s}", r"\setsansfont{%s}",
r"\setmonofont{%s}"]
for family, fontspec in zip(families, fontspecs):
matches = [f for f in rcParams["font." + family]
if f in system_fonts]
if matches:
latex_fontspec.append(fontspec % matches[0])
else:
pass # no fonts found, fallback to LaTeX defaule
return "\n".join(latex_fontspec)
def get_preamble():
"""Get LaTeX preamble from rc."""
latex_preamble = rcParams.get("pgf.preamble", "")
if type(latex_preamble) == list:
latex_preamble = "\n".join(latex_preamble)
return latex_preamble
###############################################################################
# This almost made me cry!!!
# In the end, it's better to use only one unit for all coordinates, since the
# arithmetic in latex seems to produce inaccurate conversions.
latex_pt_to_in = 1. / 72.27
latex_in_to_pt = 1. / latex_pt_to_in
mpl_pt_to_in = 1. / 72.
mpl_in_to_pt = 1. / mpl_pt_to_in
###############################################################################
# helper functions
NO_ESCAPE = r"(?<!\\)(?:\\\\)*"
re_mathsep = re.compile(NO_ESCAPE + r"\$")
re_escapetext = re.compile(NO_ESCAPE + "([_^$%])")
repl_escapetext = lambda m: "\\" + m.group(1)
re_mathdefault = re.compile(NO_ESCAPE + r"(\\mathdefault)")
repl_mathdefault = lambda m: m.group(0)[:-len(m.group(1))]
def common_texification(text):
"""
Do some necessary and/or useful substitutions for texts to be included in
LaTeX documents.
"""
# Sometimes, matplotlib adds the unknown command \mathdefault.
# Not using \mathnormal instead since this looks odd for the latex cm font.
text = re_mathdefault.sub(repl_mathdefault, text)
# split text into normaltext and inline math parts
parts = re_mathsep.split(text)
for i, s in enumerate(parts):
if not i % 2:
# textmode replacements
s = re_escapetext.sub(repl_escapetext, s)
else:
# mathmode replacements
s = r"\(\displaystyle %s\)" % s
parts[i] = s
return "".join(parts)
def writeln(fh, line):
# every line of a file included with \input must be terminated with %
# if not, latex will create additional vertical spaces for some reason
fh.write(line)
fh.write("%\n")
def _font_properties_str(prop):
# translate font properties to latex commands, return as string
commands = []
families = {"serif": r"\rmfamily", "sans": r"\sffamily",
"sans-serif": r"\sffamily", "monospace": r"\ttfamily"}
family = prop.get_family()[0]
if family in families:
commands.append(families[family])
elif family in system_fonts and get_texcommand() is not "pdflatex":
commands.append(r"\setmainfont{%s}\rmfamily" % family)
else:
pass # print warning?
size = prop.get_size_in_points()
commands.append(r"\fontsize{%f}{%f}" % (size, size * 1.2))
styles = {"normal": r"", "italic": r"\itshape", "oblique": r"\slshape"}
commands.append(styles[prop.get_style()])
boldstyles = ["semibold", "demibold", "demi", "bold", "heavy",
"extra bold", "black"]
if prop.get_weight() in boldstyles:
commands.append(r"\bfseries")
commands.append(r"\selectfont")
return "".join(commands)
def make_pdf_to_png_converter():
"""
Returns a function that converts a pdf file to a png file.
"""
tools_available = []
# check for pdftocairo
try:
check_output(["pdftocairo", "-v"], stderr=subprocess.STDOUT)
tools_available.append("pdftocairo")
except:
pass
# check for ghostscript
try:
gs = "gs" if sys.platform is not "win32" else "gswin32c"
check_output([gs, "-v"], stderr=subprocess.STDOUT)
tools_available.append("gs")
except:
pass
# pick converter
if "pdftocairo" in tools_available:
def cairo_convert(pdffile, pngfile, dpi):
cmd = ["pdftocairo", "-singlefile", "-png",
"-r %d" % dpi, pdffile, os.path.splitext(pngfile)[0]]
# for some reason this doesn't work without shell
check_output(" ".join(cmd), shell=True, stderr=subprocess.STDOUT)
return cairo_convert
elif "gs" in tools_available:
def gs_convert(pdffile, pngfile, dpi):
cmd = [gs, '-dQUIET', '-dSAFER', '-dBATCH', '-dNOPAUSE', '-dNOPROMPT',
'-sDEVICE=png16m', '-dUseCIEColor', '-dTextAlphaBits=4',
'-dGraphicsAlphaBits=4', '-dDOINTERPOLATE', '-sOutputFile=%s' % pngfile,
'-r%d' % dpi, pdffile]
check_output(cmd, stderr=subprocess.STDOUT)
return gs_convert
else:
raise RuntimeError("No suitable pdf to png renderer found.")
class LatexError(Exception):
def __init__(self, message, latex_output=""):
Exception.__init__(self, message)
self.latex_output = latex_output
class LatexManagerFactory:
previous_instance = None
@staticmethod
def get_latex_manager():
texcommand = get_texcommand()
latex_header = LatexManager._build_latex_header()
prev = LatexManagerFactory.previous_instance
# check if the previous instance of LatexManager can be reused
if prev and prev.latex_header == latex_header and prev.texcommand == texcommand:
if rcParams.get("pgf.debug", False):
print "reusing LatexManager"
return prev
else:
if rcParams.get("pgf.debug", False):
print "creating LatexManager"
new_inst = LatexManager()
LatexManagerFactory.previous_instance = new_inst
return new_inst
class WeakSet:
# TODO: Poor man's weakref.WeakSet.
# Remove this once python 2.6 support is dropped from matplotlib.
def __init__(self):
self.weak_key_dict = weakref.WeakKeyDictionary()
def add(self, item):
self.weak_key_dict[item] = None
def discard(self, item):
if item in self.weak_key_dict:
del self.weak_key_dict[item]
def __iter__(self):
return self.weak_key_dict.iterkeys()
class LatexManager:
"""
The LatexManager opens an instance of the LaTeX application for
determining the metrics of text elements. The LaTeX environment can be
modified by setting fonts and/or a custem preamble in the rc parameters.
"""
_unclean_instances = WeakSet()
@staticmethod
def _build_latex_header():
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
# Create LaTeX header with some content, else LaTeX will load some
# math fonts later when we don't expect the additional output on stdout.
# TODO: is this sufficient?
latex_header = [r"\documentclass{minimal}",
latex_preamble,
latex_fontspec,
r"\begin{document}",
r"text $math \mu$", # force latex to load fonts now
r"\typeout{pgf_backend_query_start}"]
return "\n".join(latex_header)
@staticmethod
def _cleanup_remaining_instances():
unclean_instances = list(LatexManager._unclean_instances)
for latex_manager in unclean_instances:
latex_manager._cleanup()
def _stdin_writeln(self, s):
self.latex_stdin_utf8.write(s)
self.latex_stdin_utf8.write("\n")
self.latex_stdin_utf8.flush()
def _expect(self, s):
exp = s.encode("utf8")
buf = bytearray()
while True:
b = self.latex.stdout.read(1)
buf += b
if buf[-len(exp):] == exp:
break
if not len(b):
raise LatexError("LaTeX process halted", buf.decode("utf8"))
return buf.decode("utf8")
def _expect_prompt(self):
return self._expect("\n*")
def __init__(self):
# create a tmp directory for running latex, remember to cleanup
self.tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_lm_")
LatexManager._unclean_instances.add(self)
# test the LaTeX setup to ensure a clean startup of the subprocess
self.texcommand = get_texcommand()
self.latex_header = LatexManager._build_latex_header()
latex_end = "\n\\makeatletter\n\\@@end\n"
latex = subprocess.Popen([self.texcommand, "-halt-on-error"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
cwd=self.tmpdir)
test_input = self.latex_header + latex_end
stdout, stderr = latex.communicate(test_input.encode("utf-8"))
if latex.returncode != 0:
raise LatexError("LaTeX returned an error, probably missing font or error in preamble:\n%s" % stdout)
# open LaTeX process for real work
latex = subprocess.Popen([self.texcommand, "-halt-on-error"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
cwd=self.tmpdir)
self.latex = latex
self.latex_stdin_utf8 = codecs.getwriter("utf8")(self.latex.stdin)
# write header with 'pgf_backend_query_start' token
self._stdin_writeln(self._build_latex_header())
# read all lines until our 'pgf_backend_query_start' token appears
self._expect("*pgf_backend_query_start")
self._expect_prompt()
# cache for strings already processed
self.str_cache = {}
def _cleanup(self):
if not os.path.isdir(self.tmpdir):
return
try:
self.latex_stdin_utf8.close()
self.latex.communicate()
self.latex.wait()
except:
pass
try:
shutil.rmtree(self.tmpdir)
LatexManager._unclean_instances.discard(self)
except:
sys.stderr.write("error deleting tmp directory %s\n" % self.tmpdir)
def __del__(self):
if rcParams.get("pgf.debug", False):
print "deleting LatexManager"
self._cleanup()
def get_width_height_descent(self, text, prop):
"""
Get the width, total height and descent for a text typesetted by the
current LaTeX environment.
"""
# apply font properties and define textbox
prop_cmds = _font_properties_str(prop)
textbox = "\\sbox0{%s %s}" % (prop_cmds, text)
# check cache
if textbox in self.str_cache:
return self.str_cache[textbox]
# send textbox to LaTeX and wait for prompt
self._stdin_writeln(textbox)
try:
self._expect_prompt()
except LatexError as e:
msg = "Error processing '%s'\nLaTeX Output:\n%s"
raise ValueError(msg % (text, e.latex_output))
# typeout width, height and text offset of the last textbox
self._stdin_writeln(r"\typeout{\the\wd0,\the\ht0,\the\dp0}")
# read answer from latex and advance to the next prompt
try:
answer = self._expect_prompt()
except LatexError as e:
msg = "Error processing '%s'\nLaTeX Output:\n%s"
raise ValueError(msg % (text, e.latex_output))
# parse metrics from the answer string
try:
width, height, offset = answer.splitlines()[0].split(",")
except:
msg = "Error processing '%s'\nLaTeX Output:\n%s" % (text, answer)
raise ValueError(msg)
w, h, o = float(width[:-2]), float(height[:-2]), float(offset[:-2])
# the height returned from LaTeX goes from base to top.
# the height matplotlib expects goes from bottom to top.
self.str_cache[textbox] = (w, h + o, o)
return w, h + o, o
class RendererPgf(RendererBase):
def __init__(self, figure, fh):
"""
Creates a new PGF renderer that translates any drawing instruction
into text commands to be interpreted in a latex pgfpicture environment.
Attributes:
* figure: Matplotlib figure to initialize height, width and dpi from.
* fh: File handle for the output of the drawing commands.
"""
RendererBase.__init__(self)
self.dpi = figure.dpi
self.fh = fh
self.figure = figure
self.image_counter = 0
# get LatexManager instance
self.latexManager = LatexManagerFactory.get_latex_manager()
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
writeln(self.fh, r"\begin{pgfscope}")
# convert from display units to in
f = 1. / self.dpi
# set style and clip
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
# build marker definition
bl, tr = marker_path.get_extents(marker_trans).get_points()
coords = bl[0] * f, bl[1] * f, tr[0] * f, tr[1] * f
writeln(self.fh, r"\pgfsys@defobject{currentmarker}{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{" % coords)
self._print_pgf_path(marker_path, marker_trans)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"}")
# draw marker for each vertex
for point, code in path.iter_segments(trans, simplify=False):
x, y = point[0] * f, point[1] * f
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (x, y))
writeln(self.fh, r"\pgfsys@useobject{currentmarker}{}")
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"\end{pgfscope}")
def draw_path(self, gc, path, transform, rgbFace=None):
writeln(self.fh, r"\begin{pgfscope}")
# draw the path
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
self._print_pgf_path(path, transform)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"\end{pgfscope}")
# if present, draw pattern on top
if gc.get_hatch():
writeln(self.fh, r"\begin{pgfscope}")
# combine clip and path for clipping
self._print_pgf_clip(gc)
self._print_pgf_path(path, transform)
writeln(self.fh, r"\pgfusepath{clip}")
# build pattern definition
writeln(self.fh, r"\pgfsys@defobject{currentpattern}{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}{")
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh, r"\pgfpathrectangle{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}")
writeln(self.fh, r"\pgfusepath{clip}")
scale = mpl.transforms.Affine2D().scale(self.dpi)
self._print_pgf_path(gc.get_hatch_path(), scale)
self._pgf_path_draw(stroke=True)
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"}")
# repeat pattern, filling the bounding rect of the path
f = 1. / self.dpi
(xmin, ymin), (xmax, ymax) = path.get_extents(transform).get_points()
xmin, xmax = f * xmin, f * xmax
ymin, ymax = f * ymin, f * ymax
repx, repy = int(math.ceil(xmax-xmin)), int(math.ceil(ymax-ymin))
writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (xmin, ymin))
for iy in range(repy):
for ix in range(repx):
writeln(self.fh, r"\pgfsys@useobject{currentpattern}{}")
writeln(self.fh, r"\pgfsys@transformshift{1in}{0in}")
writeln(self.fh, r"\pgfsys@transformshift{-%din}{0in}" % repx)
writeln(self.fh, r"\pgfsys@transformshift{0in}{1in}")
writeln(self.fh, r"\end{pgfscope}")
def _print_pgf_clip(self, gc):
f = 1. / self.dpi
# check for clip box
bbox = gc.get_clip_rectangle()
if bbox:
p1, p2 = bbox.get_points()
w, h = p2 - p1
coords = p1[0] * f, p1[1] * f, w * f, h * f
writeln(self.fh, r"\pgfpathrectangle{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}} " % coords)
writeln(self.fh, r"\pgfusepath{clip}")
# check for clip path
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
self._print_pgf_path(clippath, clippath_trans)
writeln(self.fh, r"\pgfusepath{clip}")
def _print_pgf_path_styles(self, gc, rgbFace):
# cap style
capstyles = {"butt": r"\pgfsetbuttcap",
"round": r"\pgfsetroundcap",
"projecting": r"\pgfsetrectcap"}
writeln(self.fh, capstyles[gc.get_capstyle()])
# join style
joinstyles = {"miter": r"\pgfsetmiterjoin",
"round": r"\pgfsetroundjoin",
"bevel": r"\pgfsetbeveljoin"}
writeln(self.fh, joinstyles[gc.get_joinstyle()])
# filling
has_fill = rgbFace is not None
path_is_transparent = gc.get_alpha() != 1.0
fill_is_transparent = has_fill and (len(rgbFace) > 3) and (rgbFace[3] != 1.0)
if has_fill:
writeln(self.fh, r"\definecolor{currentfill}{rgb}{%f,%f,%f}" % tuple(rgbFace[:3]))
writeln(self.fh, r"\pgfsetfillcolor{currentfill}")
if has_fill and (path_is_transparent or fill_is_transparent):
opacity = gc.get_alpha() * 1.0 if not fill_is_transparent else rgbFace[3]
writeln(self.fh, r"\pgfsetfillopacity{%f}" % opacity)
# linewidth and color
lw = gc.get_linewidth() * mpl_pt_to_in * latex_in_to_pt
stroke_rgba = gc.get_rgb()
writeln(self.fh, r"\pgfsetlinewidth{%fpt}" % lw)
writeln(self.fh, r"\definecolor{currentstroke}{rgb}{%f,%f,%f}" % stroke_rgba[:3])
writeln(self.fh, r"\pgfsetstrokecolor{currentstroke}")
if gc.get_alpha() != 1.0:
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % gc.get_alpha())
# line style
dash_offset, dash_list = gc.get_dashes()
ls = gc.get_linestyle(None)
if ls == "solid":
writeln(self.fh, r"\pgfsetdash{}{0pt}")
elif (ls == "dashed" or ls == "dashdot" or ls == "dotted"):
dash_str = r"\pgfsetdash{"
for dash in dash_list:
dash_str += r"{%fpt}" % dash
dash_str += r"}{%fpt}" % dash_offset
writeln(self.fh, dash_str)
def _print_pgf_path(self, path, transform):
f = 1. / self.dpi
# build path
for points, code in path.iter_segments(transform):
if code == Path.MOVETO:
x, y = tuple(points)
writeln(self.fh, r"\pgfpathmoveto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CLOSEPOLY:
writeln(self.fh, r"\pgfpathclose")
elif code == Path.LINETO:
x, y = tuple(points)
writeln(self.fh, r"\pgfpathlineto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CURVE3:
cx, cy, px, py = tuple(points)
coords = cx * f, cy * f, px * f, py * f
writeln(self.fh, r"\pgfpathquadraticcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords)
elif code == Path.CURVE4:
c1x, c1y, c2x, c2y, px, py = tuple(points)
coords = c1x * f, c1y * f, c2x * f, c2y * f, px * f, py * f
writeln(self.fh, r"\pgfpathcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords)
def _pgf_path_draw(self, stroke=True, fill=False):
actions = []
if stroke:
actions.append("stroke")
if fill:
actions.append("fill")
writeln(self.fh, r"\pgfusepath{%s}" % ",".join(actions))
def draw_image(self, gc, x, y, im):
# TODO: Almost no documentation for the behavior of this function.
# Something missing?
# save the images to png files
path = os.path.dirname(self.fh.name)
fname = os.path.splitext(os.path.basename(self.fh.name))[0]
fname_img = "%s-img%d.png" % (fname, self.image_counter)
self.image_counter += 1
im.flipud_out()
rows, cols, buf = im.as_rgba_str()
_png.write_png(buf, cols, rows, os.path.join(path, fname_img))
# reference the image in the pgf picture
writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_clip(gc)
h, w = im.get_size_out()
f = 1. / self.dpi # from display coords to inch
writeln(self.fh, r"\pgftext[at=\pgfqpoint{%fin}{%fin},left,bottom]{\pgfimage[interpolate=true,width=%fin,height=%fin]{%s}}" % (x * f, y * f, w * f, h * f, fname_img))
writeln(self.fh, r"\end{pgfscope}")
def draw_tex(self, gc, x, y, s, prop, angle, ismath="TeX!"):
self.draw_text(gc, x, y, s, prop, angle, ismath)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
s = common_texification(s)
# apply font properties
prop_cmds = _font_properties_str(prop)
s = ur"{%s %s}" % (prop_cmds, s)
# draw text at given coordinates
x = x * 1. / self.dpi
y = y * 1. / self.dpi
writeln(self.fh, r"\begin{pgfscope}")
alpha = gc.get_alpha()
if alpha != 1.0:
writeln(self.fh, r"\pgfsetfillopacity{%f}" % alpha)
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % alpha)
stroke_rgb = tuple(gc.get_rgb())[:3]
if stroke_rgb != (0, 0, 0):
writeln(self.fh, r"\definecolor{textcolor}{rgb}{%f,%f,%f}" % stroke_rgb)
writeln(self.fh, r"\pgfsetstrokecolor{textcolor}")
writeln(self.fh, r"\pgfsetfillcolor{textcolor}")
writeln(self.fh, "\\pgftext[left,bottom,x=%fin,y=%fin,rotate=%f]{%s}\n" % (x, y, angle, s))
writeln(self.fh, r"\end{pgfscope}")
def get_text_width_height_descent(self, s, prop, ismath):
# check if the math is supposed to be displaystyled
s = common_texification(s)
# get text metrics in units of latex pt, convert to display units
w, h, d = self.latexManager.get_width_height_descent(s, prop)
# TODO: this should be latex_pt_to_in instead of mpl_pt_to_in
# but having a little bit more space around the text looks better,
# plus the bounding box reported by LaTeX is VERY narrow
f = mpl_pt_to_in * self.dpi
return w * f, h * f, d * f
def flipy(self):
return False
def get_canvas_width_height(self):
return self.figure.get_figwidth(), self.figure.get_figheight()
def points_to_pixels(self, points):
return points * mpl_pt_to_in * self.dpi
def new_gc(self):
return GraphicsContextPgf()
class GraphicsContextPgf(GraphicsContextBase):
pass
########################################################################
def draw_if_interactive():
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPgf(figure)
manager = FigureManagerPgf(canvas, num)
return manager
class TmpDirCleaner:
remaining_tmpdirs = set()
@staticmethod
def add(tmpdir):
TmpDirCleaner.remaining_tmpdirs.add(tmpdir)
@staticmethod
def cleanup_remaining_tmpdirs():
for tmpdir in TmpDirCleaner.remaining_tmpdirs:
try:
shutil.rmtree(tmpdir)
except:
sys.stderr.write("error deleting tmp directory %s\n" % tmpdir)
class FigureCanvasPgf(FigureCanvasBase):
filetypes = {"pgf": "LaTeX PGF picture",
"pdf": "LaTeX compiled PGF picture",
"png": "Portable Network Graphics", }
def __init__(self, *args):
FigureCanvasBase.__init__(self, *args)
def get_default_filetype(self):
return 'pdf'
def _print_pgf_to_fh(self, fh):
header_text = r"""%% Creator: Matplotlib, PGF backend
%%
%% To include the figure in your LaTeX document, write
%% \input{<filename>.pgf}
%%
%% Make sure the required packages are loaded in your preamble
%% \usepackage{pgf}
%%
%% Figures using additional raster images can only be included by \input if
%% they are in the same directory as the main LaTeX file. For loading figures
%% from other directories you can use the `import` package
%% \usepackage{import}
%% and then include the figures with
%% \import{<path to file>}{<filename>.pgf}
%%
"""
# append the preamble used by the backend as a comment for debugging
header_info_preamble = ["%% Matplotlib used the following preamble"]
for line in get_preamble().splitlines():
header_info_preamble.append("%% " + line)
for line in get_fontspec().splitlines():
header_info_preamble.append("%% " + line)
header_info_preamble.append("%%")
header_info_preamble = "\n".join(header_info_preamble)
# get figure size in inch
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
# create pgfpicture environment and write the pgf code
fh.write(header_text)
fh.write(header_info_preamble)
fh.write("\n")
writeln(fh, r"\begingroup")
writeln(fh, r"\makeatletter")
writeln(fh, r"\begin{pgfpicture}")
writeln(fh, r"\pgfpathrectangle{\pgfpointorigin}{\pgfqpoint{%fin}{%fin}}" % (w, h))
writeln(fh, r"\pgfusepath{use as bounding box}")
renderer = RendererPgf(self.figure, fh)
self.figure.draw(renderer)
# end the pgfpicture environment
writeln(fh, r"\end{pgfpicture}")
writeln(fh, r"\makeatother")
writeln(fh, r"\endgroup")
def print_pgf(self, fname_or_fh, *args, **kwargs):
"""
Output pgf commands for drawing the figure so it can be included and
rendered in latex documents.
"""
if kwargs.get("dryrun", False):
return
# figure out where the pgf is to be written to
if is_string_like(fname_or_fh):
with codecs.open(fname_or_fh, "w", encoding="utf-8") as fh:
self._print_pgf_to_fh(fh)
elif is_writable_file_like(fname_or_fh):
raise ValueError("saving pgf to a stream is not supported, " +
"consider using the pdf option of the pgf-backend")
else:
raise ValueError("filename must be a path")
def _print_pdf_to_fh(self, fh):
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
try:
# create temporary directory for compiling the figure
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pgf = os.path.join(tmpdir, "figure.pgf")
fname_tex = os.path.join(tmpdir, "figure.tex")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
# print figure to pgf and compile it with latex
self.print_pgf(fname_pgf)
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
latexcode = r"""
\documentclass[12pt]{minimal}
\usepackage[paperwidth=%fin, paperheight=%fin, margin=0in]{geometry}
%s
%s
\usepackage{pgf}
\begin{document}
\centering
\input{figure.pgf}
\end{document}""" % (w, h, latex_preamble, latex_fontspec)
with codecs.open(fname_tex, "w", "utf-8") as fh_tex:
fh_tex.write(latexcode)
texcommand = get_texcommand()
cmdargs = [texcommand, "-interaction=nonstopmode",
"-halt-on-error", "figure.tex"]
try:
check_output(cmdargs, stderr=subprocess.STDOUT, cwd=tmpdir)
except subprocess.CalledProcessError as e:
raise RuntimeError("%s was not able to process your file.\n\nFull log:\n%s" % (texcommand, e.output))
# copy file contents to target
with open(fname_pdf, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_pdf(self, fname_or_fh, *args, **kwargs):
"""
Use LaTeX to compile a Pgf generated figure to PDF.
"""
# figure out where the pdf is to be written to
if is_string_like(fname_or_fh):
with open(fname_or_fh, "wb") as fh:
self._print_pdf_to_fh(fh)
elif is_writable_file_like(fname_or_fh):
self._print_pdf_to_fh(fname_or_fh)
else:
raise ValueError("filename must be a path or a file-like object")
def _print_png_to_fh(self, fh):
converter = make_pdf_to_png_converter()
try:
# create temporary directory for pdf creation and png conversion
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
fname_png = os.path.join(tmpdir, "figure.png")
# create pdf and try to convert it to png
self.print_pdf(fname_pdf)
converter(fname_pdf, fname_png, dpi=self.figure.dpi)
# copy file contents to target
with open(fname_png, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_png(self, fname_or_fh, *args, **kwargs):
"""
Use LaTeX to compile a pgf figure to pdf and convert it to png.
"""
if is_string_like(fname_or_fh):
with open(fname_or_fh, "wb") as fh:
self._print_png_to_fh(fh)
elif is_writable_file_like(fname_or_fh):
self._print_png_to_fh(fname_or_fh)
else:
raise ValueError("filename must be a path or a file-like object")
def _render_texts_pgf(self, fh):
# TODO: currently unused code path
# alignment anchors
valign = {"top": "top", "bottom": "bottom", "baseline": "base", "center": ""}
halign = {"left": "left", "right": "right", "center": ""}
# alignment anchors for 90deg. rotated labels
rvalign = {"top": "left", "bottom": "right", "baseline": "right", "center": ""}
rhalign = {"left": "top", "right": "bottom", "center": ""}
# TODO: matplotlib does not hide unused tick labels yet, workaround
for tick in self.figure.findobj(mpl.axis.Tick):
tick.label1.set_visible(tick.label1On)
tick.label2.set_visible(tick.label2On)
# TODO: strange, first legend label is always "None", workaround
for legend in self.figure.findobj(mpl.legend.Legend):
labels = legend.findobj(mpl.text.Text)
labels[0].set_visible(False)
# TODO: strange, legend child labels are duplicated,
# find a list of unique text objects as workaround
texts = self.figure.findobj(match=Text, include_self=False)
texts = list(set(texts))
# draw text elements
for text in texts:
s = text.get_text()
if not s or not text.get_visible():
continue
s = common_texification(s)
fontsize = text.get_fontsize()
angle = text.get_rotation()
transform = text.get_transform()
x, y = transform.transform_point(text.get_position())
x = x * 1.0 / self.figure.dpi
y = y * 1.0 / self.figure.dpi
# TODO: positioning behavior unknown for rotated elements
# right now only the alignment for 90deg rotations is correct
if angle == 90.:
align = rvalign[text.get_va()] + "," + rhalign[text.get_ha()]
else:
align = valign[text.get_va()] + "," + halign[text.get_ha()]
s = ur"{\fontsize{%f}{%f}\selectfont %s}" % (fontsize, fontsize*1.2, s)
writeln(fh, ur"\pgftext[%s,x=%fin,y=%fin,rotate=%f]{%s}" % (align,x,y,angle,s))
def get_renderer(self):
return RendererPgf(self.figure, None)
class FigureManagerPgf(FigureManagerBase):
def __init__(self, *args):
FigureManagerBase.__init__(self, *args)
########################################################################
FigureManager = FigureManagerPgf
def _cleanup_all():
LatexManager._cleanup_remaining_instances()
TmpDirCleaner.cleanup_remaining_tmpdirs()
atexit.register(_cleanup_all)
|
|
import Tools.HTML
if __name__ == "__main__":
import sys,os
append_path = os.path.abspath(sys.argv[0])[:-20]
print("Append to PYTHONPATH: %s" % (append_path))
sys.path.append(append_path)
import re
import copy
from Tools.file2 import file2
from Tools import ChemicalInfo
from Geometry import Scan,IRC,Geom,ListGeoms
from ElectronicStructure import ElectronicStructure
from Containers import AtomicProps
from Interface.NBO import NBO
import logging
log = logging.getLogger(__name__)
#TODO take advantages from BetterFile
class Gaussian(ElectronicStructure):
"""
Gaussian 09 parser
Analyzes a multiple-step calculation
"""
def __init__(self):
"""
Declares steps (type List)
"""
self.steps = []
def parse(self):
"""
Parses Gaussian log file, step by step
"""
try:
FI = file2(self.file)
log.debug('%s was opened for reading' %(self.file))
except:
log.error('Cannot open %s for reading' %(self.file))
return
while True:
step = GauStep(FI)
step.parse()
step.postprocess()
if step.blank:
break
self.steps.append(step)
FI.close()
log.debug('%s parsed successfully' % (self.file))
return
def webdata(self):
"""
Returns 2 strings with HTML code
"""
we = self.settings.Engine3D()
b1,b2,bb1,bb2,i = '','','','',1
MaxGeoms, n_Freq = 0, 0
b1s = []
for step in self.steps:
MaxGeoms = max(MaxGeoms,len(step.geoms))
if step.vector:
n_Freq = i
self.settings.subcounter += 1
step.statfile = self.settings.real_path('.stat')
b1, b2 = step.webdata(StartApplet=False)
labeltext = '%s: %s' %(step.JobType,step.lot)
b1s.append([b1,labeltext.upper()])
bb2 += b2
i += 1
if b1s:
bb1 = we.JMolApplet(ExtraScript = b1s[n_Freq-1][0])
if MaxGeoms > 1:
bb1 += Tools.HTML.brn + we.html_geom_play_controls()
if n_Freq:
bb1 += Tools.HTML.brn + we.html_vibration_switch()
if len(b1s)>1:
bb1 += Tools.HTML.brn * 2
# add buttons for each step
for b1 in b1s:
bb1 += we.html_button(*b1)
log.debug('webdata generated successfully')
return bb1, bb2
def usage(self):
for step in self.steps:
step.usage()
class GauStep(ElectronicStructure):
"""
Works with a single calculation step
"""
def __init__(self,FI=None):
super().__init__(FI)
self.rc = {
'/' : re.compile('(\S*\/\S+)'),
'iop' : re.compile('iop\((.*?)\)'),
'scrf-solv': re.compile('scrf.*solvent\s*=\s*(\w+)',re.IGNORECASE),
's2' : re.compile(' S\*\*2 before annihilation\s+(\S+),.*?\s+(\S+)$'),
'nbo-bond' : re.compile('\) BD \(.*\s+(\S+)\s*-\s*\S+\s+(\S+)'),
'basis-fn' : re.compile('^ AtFile\(1\):\s+(.*?).gbs'),
'chk' : re.compile('^ %%chk\s*=\s*(\S+)'),
'charge-mult' : re.compile('^ Charge =\s+(\S+)\s+Multiplicity =\s+(\S+)'),
'scf done' : re.compile('^ SCF Done.*?=\s+(\S+)'),
'qcisd_t' : re.compile('^ QCISD\(T\)=\s*(\S+)'),
'scf_conv' : re.compile('^ E=\s*(\S+)'),
'scf_iter' : re.compile('^ Iteration\s+\S+\s+EE=\s*(\S+)'),
'ci_cc_conv' : re.compile('^ DE\(Corr\)=\s*\S+\s*E\(CORR\)=\s*(\S+)'),
'xyz' : re.compile('^\s+\S+\s+(\S+).*\s+(\S+)\s+(\S+)\s+(\S+)\s*$'),
'scan param' : re.compile('^ !\s+(\S+)\s+(\S+)\s+(\S+)\s+Scan\s+!$'),
'frozen' : re.compile('^ !\s+(\S+)\s+(\S+)\s+\S+\s+frozen.*!$',re.IGNORECASE),
'alnum' : re.compile('[a-zA-Z]'),
'ifreq' : re.compile('\d+\s+\d+\s+(\S+)\s+(\S+)\s+(\S+)'),
'excited state' : re.compile('^ Excited State\s+(.*?):.*?\s+(\S+)\s*nm f=\s*(\S+)'),
'scan' : re.compile('Scan\s+!$')
}
self.chash = {}
self.chash['NPA'] = {'Entry': 'XXX-XXX', 'Stop': 'XXX-XXX'}
self.chash['NPA_spin'] = {'Entry': 'XXX-XXX', 'Stop': 'XXX-XXX'}
self.chash['APT'] = {'Entry' : 'APT atomic charges:', 'Stop' : 'Sum of APT' }
self.chash['Mulliken'] = {'Entry' : 'Mulliken atomic charges:', 'Stop' : 'Sum of Mulliken' }
self.lot_nobasis = (
'cbs-qb3','cbs-4m','cbs-apno',
'g1', 'g2', 'g2mp2', 'g3', 'g3mp2', 'g3b3', 'g3mp2b3', 'g4', 'g4mp2', 'g3mp2b3',
'w1u', 'w1bd', 'w1ro',
'b1b95', 'b1lyp', 'b3lyp', 'b3p86', 'b3pw91', 'b95', 'b971', 'b972', 'b97d', 'b98', 'bhandh', 'bhandhlyp', 'bmk', 'brc', 'brx', 'cam-b3lyp', 'g96', 'hcth', 'hcth147', 'hcth407', 'hcth93', 'hfb', 'hfs', 'hse2pbe', 'hseh1pbe', 'hsehpbe', 'kcis', 'lc-wpbe', 'lyp', 'm06', 'm062x', 'm06hf', 'm06l', 'o3lyp', 'p86', 'pbe', 'pbe', 'pbe1pbe', 'pbeh', 'pbeh1pbe', 'pkzb', 'pkzb', 'pw91', 'pw91', 'tpss', 'tpssh', 'v5lyp', 'vp86', 'vsxc', 'vwn', 'vwn5', 'x3lyp', 'xa', 'xalpha', 'mpw', 'mpw1lyp', 'mpw1pbe', 'mpw1pw91', 'mpw3pbe', 'thcth', 'thcthhyb', 'wb97', 'wb97x', 'wb97xd', 'wpbeh',
'mp2', 'mp3', 'mp4', 'mp5', 'b2plyp', 'mpw2plyp',
'ccd','ccsd','ccsd(t)','cid','cisd','qcisd(t)','sac-ci',
'am1','pm3','pm6','cndo','dftba','dftb','zindo','indo',
'amber','dreiding','uff',
'rhf','uhf','hf','casscf','gvb',
)
self.def_basis = (
'3-21g', '6-21g', '4-31g', '6-31g', '6-311g',
'd95v', 'd95', 'shc',
'cep-4g', 'cep-31g', 'cep-121g',
'lanl2mb', 'lanl2dz', 'sdd', 'sddall',
'cc-pvdz', 'cc-pvtz', 'cc-pvqz', 'cc-pv5z', 'cc-pv6z',
'svp', 'sv', 'tzvp', 'tzv', 'qzvp',
'midix', 'epr-ii', 'epr-iii', 'ugbs', 'mtsmall',
'dgdzvp', 'dgdzvp2', 'dgtzvp', 'cbsb7',
'gen','chkbasis',
)
self.irc_direction, self.irc_both = 1, False
self.all_coords = {}
# ------- Helper functions --------
@staticmethod
def inroute(lst,s,add=False):
result = ''
for si in lst:
for sj in s.split():
if si.lower()==sj.lower() or ('u'+si.lower())==sj.lower() or ('r'+si.lower())==sj.lower():
if add:
result += ' '+si
else:
return si
return result
#
@staticmethod
def floatize(x):
if '****' in x:
return 10.
return float(x)
# //----- Helper functions --------
def parse(self):
"""
Actual parsing happens here
"""
t_ifreq_done = False
basis_FN = ''
rc = self.rc
s = 'BLANK' # It got to be initialized!
try:
while True:
next(self.FI)
s = self.FI.s.rstrip()
#
# Try to save some time by skipping parsing of large noninformative blocks of output
#
# Does not work for AM1 calcs
"""
# Skip parsing of SCF iterations
if s.find(' Cycle')==0:
while not s == '':
s = next(self.FI).rstrip()
"""
# Skip parsing of distance matrices
if s.find('Distance matrix (angstroms):')==20:
n = len(self.all_coords[coord_type]['all'][-1])
#print('n=',n)
a1 = n % 5
an = n
num = int((an-a1)/5) + 1
n_lines_to_skip = num * (a1 + an) / 2
if a1==0:
num -= 1
n_lines_to_skip += num * (1+num) / 2
self.FI.skip_n(int(n_lines_to_skip))
s = self.FI.s.rstrip()
#
# ---------------------------------------- Read in cartesian coordinates ----------------------------------
#
# Have we found coords?
enter_coord = False
if ' orientation:' in s:
coord_type = s.split()[0]
enter_coord = True
if s.find(' Cartesian Coordinates (Ang):')==0:
coord_type = 'Cartesian Coordinates (Ang)'
enter_coord = True
# If yes, then read them
if enter_coord:
# Positioning
dashes1 = next(self.FI)
title1 = next(self.FI)
title2 = next(self.FI)
dashes2 = next(self.FI)
s = next(self.FI)
# Read in coordinates
geom = Geom()
atnames = []
while not '-------' in s:
xyz = s.strip().split()
try:
ati, x,y,z = xyz[1], xyz[-3],xyz[-2],xyz[-1]
except:
log.warning('Error reading coordinates:\n%s' % (s))
break
atn = ChemicalInfo.at_name[int(ati)]
atnames.append(atn)
geom.coord.append('%s %s %s %s' % (atn,x,y,z))
s = next(self.FI)
# Add found coordinate to output
pc = AtomicProps(attr='atnames',data=atnames)
geom.addAtProp(pc,visible=False) # We hide it, because there is no use to show atomic names for each geometry using checkboxes
if not coord_type in self.all_coords:
self.all_coords[coord_type] = {'all':ListGeoms(),'special':ListGeoms()}
self.all_coords[coord_type]['all'].geoms.append(geom)
#
# ------------------------------------------- Route lines -------------------------------------------------
#
if s.find(' #')==0:
# Read all route lines
s2 = s
while not '-----' in s2:
self.route_lines += ' ' + s2[1:]
s2 = next(self.FI).rstrip()
self.route_lines = self.route_lines.lower()
self.iop = rc['iop'].findall(self.route_lines)
self.route_lines = re.sub('iop\(.*?\)','',self.route_lines) # Quick and dirty: get rid of slash symbols
# Get Level of Theory
# Look for standard notation: Method/Basis
lot = rc['/'].search(self.route_lines)
# print self.route_lines
if lot:
self.lot, self.basis = lot.group(1).split('/')
if self.basis == 'gen' and basis_FN: # Read basis from external file
self.basis = basis_FN
else:
# Look for method and basis separately using predefined lists of standard methods and bases
lt = self.inroute(self.lot_nobasis,self.route_lines)
if lt:
self.lot = lt
bs = self.inroute(self.def_basis,self.route_lines)
if bs:
self.basis = bs
# Extract %HF in non-standard functionals
for iop in self.iop:
if '3/76' in iop:
encrypted_hf = iop.split('=')[1]
str_hf = encrypted_hf[-5:]
num_hf = float(str_hf[:3]+'.'+str_hf[3:])
self.lot_suffix += '(%.2f %%HF)' %(num_hf)
# Read solvent info
if 'scrf' in self.route_lines:
solvent = rc['scrf-solv'].search(self.route_lines)
if solvent:
self.solvent = solvent.group(1)
# Get job type from the route line
self.route_lines = re.sub('\(.*?\)','',self.route_lines) # Quick and dirty: get rid of parentheses to get a string with only top level commands
self.route_lines = re.sub('=\S*','',self.route_lines) # Quick and dirty: get rid of =... to get a string with only top level commands
jt = self.inroute(('opt','freq','irc'),self.route_lines) # Major job types
if jt:
self.JobType = jt
#print('self.route_lines: ',self.route_lines)
#print('jt',jt)
self.JobType += self.inroute(('td','nmr','stable'),self.route_lines,add=True) # Additional job types
# Recognize job type on the fly
if ' Berny optimization' in s and self.JobType=='sp':
self.JobType = 'opt'
if rc['scan'].search(s):
self.JobType = 'scan'
#
# ---------------------------------------- Read archive section -------------------------------------------
#
if 'l9999.exe' in s and 'Enter' in s:
while not '@' in self.l9999:
s2 = next(self.FI).strip()
if s2=='':
continue
self.l9999 += s2
#print self.l9999
la = self.l9999.replace('\n ','').split('\\')
if len(la)>5:
self.machine_name = la[2]
if la[5]:
self.basis = la[5]
#basis = la[5]
#if basis == 'gen':
#if basis_FN:
#self.basis = ' Basis(?): ' + basis_FN
#elif not self.basis:
#self.basis = ' Basis: n/a'
self.lot = la[4]
self.JobType9999 = la[3]
if self.JobType != self.JobType9999.lower():
self.JobType += "(%s)" % (self.JobType9999.lower())
#
# ---------------------------------------- Read simple values ---------------------------------------------
#
#Nproc
if s.find(' Will use up to') == 0:
self.n_cores = s.split()[4]
# time
if s.find(' Job cpu time:') == 0:
s_splitted = s.split()
try:
n_days = float(s_splitted[3])
n_hours = float(s_splitted[5])
n_mins = float(s_splitted[7])
n_sec = float(s_splitted[9])
self.time = n_days*24 + n_hours + n_mins/60 + n_sec/3600
except:
self.time = '***'
# n_atoms
if s.find('NAtoms=') == 1:
s_splitted = s.split()
self.n_atoms = int(s_splitted[1])
# n_basis
if s.find('basis functions') == 7:
s_splitted = s.split()
self.n_primitives = int(s_splitted[3])
# Basis
if s.find('Standard basis:') == 1:
self.basis = s.strip().split(':')[1]
# n_electrons
if s.find('alpha electrons') == 7:
s_splitted = s.split()
n_alpha = s_splitted[0]
n_beta = s_splitted[3]
self.n_electrons = int(n_alpha) + int(n_beta)
# S^2
if s.find(' S**2 before annihilation')==0:
s_splitted = s.split()
before = s_splitted[3][:-1]
after = s_splitted[5]
self.s2 = before + '/' + after
for ct in self.all_coords.values():
if ct['all']:
ct['all'][-1].addProp('s2',self.s2)
# CBS-QB3
if ' CBS-QB3 Enthalpy' in s:
self.extra += s
# Solvent
if ' Solvent :' in s:
self.solvent = s.split()[2][:-1]
# Solvation model
if not self.solv_model and 'Model :' in s:
self.solv_model = s.strip().split()[2]
# Try to guess basis name from the file name
if not basis_FN:
bas_FN = rc['basis-fn'].match(s)
if bas_FN:
basis_FN = re.sub('.*\/','',bas_FN.group(1))
# Read Checkpoint file name
if not self.chk:
chk = rc['chk'].match(s)
if chk:
self.chk = chk.group(1)
# Read Symmetry
if ' Full point group' in s:
self.sym = s.split()[3]
# Read charge_multmetry
if not self.charge:
charge_mult = rc['charge-mult'].match(s)
if charge_mult:
self.charge = charge_mult.group(1)
self.mult = charge_mult.group(2)
# Collect WF convergence
#scf_conv = rc['scf_conv'].match(s)
#if not scf_conv:
#scf_conv = rc['scf_iter'].match(s)
#if scf_conv:
#self.scf_conv.append(scf_conv.group(1))
# Read Converged HF/DFT Energy
scf_e = rc['scf done'].match(s)
if scf_e:
if s[14]=='U':
self.openShell = True
self.scf_e = float(scf_e.group(1))
self.scf_done = True
for ct in self.all_coords.values():
if ct['all']:
ct['all'][-1].addProp('e', self.scf_e) # TODO Read in something like self.best_e instead!
#CI/CC
if not self.ci_cc_done:
if ' CI/CC converged in' in s:
self.ci_cc_done = True
if ' Largest amplitude=' in s:
self.amplitude = s.split()[2].replace('D','E')
# CI/CC Convergence
ci_cc_conv = rc['ci_cc_conv'].match(s)
if ci_cc_conv:
x = float(ci_cc_conv.group(1))
self.ci_cc_conv.append(x)
"""
Do we really need to parse post-hf energies?
# Read post-HF energies
if ' EUMP2 = ' in s:
self.postHF_lot.append('MP2')
self.postHF_e.append(s.split()[-1])
# QCISD(T)
qcisd_t = rc['qcisd_t'].match(s)
if qcisd_t:
self.postHF_lot.append('QCISD(T)')
self.postHF_e.append(qcisd_t.group(1))
"""
"""
#XXX Probably, we don't need it at all as more reliable topology can be read from NBO output
# Read in internal coordinates topology
if '! Name Definition Value Derivative Info. !' in s:
dashes = next(self.FI)
s = next(self.FI).strip()
while not '----' in s:
self.topology.append(s.split()[2])
s = next(self.FI).strip()
"""
#
# ------------------------------------- NBO Topology -----------------------------------
#
if 'N A T U R A L B O N D O R B I T A L A N A L Y S I S' in s:
nbo_analysis = NBO()
nbo_analysis.FI = self.FI
nbo_analysis.parse()
nbo_analysis.postprocess()
self.topologies.append(nbo_analysis.topology) # Actually, we save a reference, so we can keep using nbo_top
for ct in self.all_coords.values():
if ct['all']:
last_g = ct['all'][-1]
last_g.nbo_analysis = nbo_analysis
last_g.addAtProp(nbo_analysis.charges)
if nbo_analysis.OpenShell:
last_g.addAtProp(nbo_analysis.spins)
#
# ------------------------------------- NMR chemical shifts -----------------------------------
#
if 'SCF GIAO Magnetic shielding tensor (ppm)' in s:
nmr = AtomicProps(attr='nmr')
s = next(self.FI)
while 'Isotropic' in s:
c = s.strip().split()[4]
nmr.data.append(float(c))
next(self.FI)
next(self.FI)
next(self.FI)
next(self.FI)
s = next(self.FI)
nmr_proton = AtomicProps(attr='nmr_proton')
nmr_proton.data = copy.deepcopy(nmr.data)
nmr_carbon = AtomicProps(attr='nmr_carbon')
nmr_carbon.data = copy.deepcopy(nmr.data)
for ct in self.all_coords.values():
if ct['all']:
ct['all'][-1].addAtProp(nmr)
ct['all'][-1].addAtProp(nmr_proton)
ct['all'][-1].addAtProp(nmr_carbon)
#
# ------------------------------------- Charges -------------------------------------
#
for ch in self.chash.keys():
if self.chash[ch]['Entry'] in s:
pc = AtomicProps(attr=ch)
next(self.FI)
s = next(self.FI)
while not self.chash[ch]['Stop'] in s:
c = s.strip().split()[2]
pc.data.append(float(c))
s = next(self.FI)
for ct in self.all_coords.values():
if ct['all']:
ct['all'][-1].addAtProp(pc)
#
# --------------------------------------------- Opt -------------------------------------------------------
#
if 'opt' in self.JobType:
if ' Item Value Threshold Converged?' in s:
self.opt_iter += 1
for conv in ('max_force','rms_force','max_displacement','rms_displacement'):
s = next(self.FI)
x, thr = self.floatize(s[27:35]), float(s[40:48])
conv_param = getattr(self,conv)
conv_param.append(x-thr)
for ct in self.all_coords.values():
if ct['all']:
ct['all'][-1].addProp(conv, x-thr)
if ' -- Stationary point found.' in s:
self.opt_ok = True
#
# --------------------------------------------- IRC -------------------------------------------------------
#
if 'irc' in self.JobType:
# IRC geometry was just collected?
if 'Magnitude of analytic gradient =' in s:
self.grad = float(s.split('=')[1])
if 'Rxn path following direction =' in s:
if 'Forward' in s:
self.irc_direction = 1
if 'Reverse' in s:
self.irc_direction = -1
"""
b_optd = ('Optimized point #' in s) and ('Found' in s)
b_deltax = ' Delta-x Convergence Met' in s
b_flag = 'Setting convergence flag and skipping corrector integration' in s
t_irc_point = b_optd or b_deltax or b_flag
"""
"""
G03:
Order of IRC-related parameters:
1. Geometry,
2. Energy calculated for that geometry
3. Optimization convergence test
G09:
For IRC, there is a geometry entry right before the 'NET REACTION COORDINATE' string,
and energy has not been attached to it yet, so we do it manually
"""
if 'NET REACTION COORDINATE UP TO THIS POINT =' in s:
x = float(s.split('=')[1])
for ct in self.all_coords.values():
if ct['all']:
girc = ct['all'][-1]
girc.addProp('x', x*self.irc_direction)
girc.addProp('e', self.scf_e)
if '/' in str(self.s2):
girc.addProp('s2', self.s2.split('/')[1].strip())
ct['special'].geoms.append(girc)
if 'Minimum found on this side of the potential' in s\
or 'Begining calculation of the REVERSE path' in s:
self.irc_direction *= -1
self.irc_both = True
#
# -------------------------------------------- Scan -------------------------------------------------------
#
if 'scan' in self.JobType:
"""
Order of scan-related parameters:
1. Geometry,
2. Energy calculated for that geometry
3. Optimization convergence test
If Stationary point has been found, we already have geometry with energy attached as prop, so we just pick it up
"""
# Memorize scan geometries
if ' -- Stationary point found.' in s:
for ct in self.all_coords.values():
if ct['all']:
ct['special'].geoms.append(ct['all'][-1])
# Record scanned parameters
for param in self.scan_param_description.values():
if ' ! ' in s and param in s:
x = float(s.split()[3])
for ct in self.all_coords.values():
if ct['special']:
ct['special'][-1].addProp(param,x)
# Keep extended information about scanned parameter
sc = rc['scan param'].match(s)
if sc:
param, param_full = sc.group(1), sc.group(2)
self.scan_param_description[param] = param_full
#
# ------------------------------------- Scan or Opt: Frozen parameters -------------------------------------
#
if 'scan' in self.JobType or 'opt' in self.JobType:
sc = rc['frozen'].match(s)
if sc:
self.frozen[sc.group(1)] = sc.group(2)
#
# ------------------------------------------ Freqs --------------------------------------------------------
#
if 'freq' in self.JobType or 'opt' in self.JobType:
# T
if ' Temperature ' in s:
x = float(s.split()[1])
self.freq_temp.append(x)
# ZPE, H, G
if ' Sum of electronic and zero-point Energies=' in s:
x = float(s.split()[-1])
self.freq_zpe.append(x)
next(self.FI)
# H
Htherm = next(self.FI)
x = float(Htherm.split('=')[1])
self.freq_ent.append(x)
# G
Gtherm = next(self.FI)
x = float(Gtherm.split('=')[1])
self.freq_G.append(x)
# Read in vibrational modes
if 'Frequencies' in s:
for fr in s.split(' '):
if '.' in fr:
self.freqs.append(float(fr))
# Read in imaginary frequencies
if (not t_ifreq_done) \
and (self.freqs) \
and (self.freqs[0]<0) \
and not rc['alnum'].search(s):
ifreq = rc['ifreq'].search(s)
if ifreq:
x, y, z = ifreq.groups()
self.vector.append('%s %s %s' % (x,y,z))
else:
t_ifreq_done = True
#
# --------------------------------------- TD --------------------------------------------------------------
#
if 'td' in self.JobType:
if 'Excitation energies and oscillator strengths' in s:
self.uv = {}
uv = rc['excited state'].match(s)
if uv:
self.n_states = uv.group(1)
#print self.n_states
l,f = float(uv.group(2)),float(uv.group(3))
self.uv[l] = f
#self.uv[uv.group(1)] = uv.group(2)
#
# --------------------------------------- Stable --------------------------------------------------------------
#
if 'stable' in self.JobType:
if s.find(' The wavefunction has an')==0 and 'instability' in s:
self.extra += s
#
# ======================================= End of Gau Step ==================================================
#
if 'Normal termination of Gaussian' in s:
self.OK = True
break
except StopIteration:
log.error('Unexpected end of Gaussian file')
# We got here either
self.blank = (s == 'BLANK')
return
def postprocess(self):
#
# ======================================= Postprocessing ======================================================
#
if self.lot_suffix:
self.lot += self.lot_suffix
"""
Choose coordinates to show in JMol
Standard:
'+' Compatible with vib. frequencies
'-' Tends to swap each several steps, not very good for viewing
'-' Not available when NoSym option provided
Input:
'-' Not compatible with vib. frequencies
'+' Gives smooth change of geometries
Cartesian Coordinates, Z-Matrix:
'+' In some cases, the only coordinates given in the output file
"""
if self.freqs and self.freqs[0]<0:
order = ('Standard','Input','Cartesian Coordinates (Ang)','Z-Matrix')
else:
order = ('Input','Cartesian Coordinates (Ang)','Z-Matrix','Standard')
n_steps_by_to = {}
for to in order:
if to in self.all_coords:
nst = len(self.all_coords[to]['all'].geoms)
if nst > self.n_steps:
self.n_steps = nst
# choose geometries to show
for tp in ('special','all'):
for to in order:
if to in self.all_coords and self.all_coords[to][tp]:
self.geoms = self.all_coords[to][tp]
break
if self.geoms:
log.debug('%s orientation used' % (to))
break
del self.all_coords
if 'irc' in self.JobType:
self.series = IRC(other=self.geoms)
self.series.direction = self.irc_direction
self.series.both = self.irc_both
del self.irc_direction
del self.irc_both
if 'scan' in self.JobType:
self.series = Scan(other=self.geoms)
if self.freqs and self.geoms:
if self.OK:
self.geoms.geoms = [self.geoms[-1],]
# Scale NMR
for geom in self.geoms: # Loop over geoms
#for ap_name in geom.atprops: # Loop over props
if 'nmr_proton' in geom.atprops:
ap = geom.nmr_proton
for ati in range(len(ap.data)):
#ap.data[ati] = 0.1 + 0.9255*(31.7478-ap.data[ati]) # scale is based on Ph-tBu and DBC # B1LYP-40/6-31G(d)+PCM(CH2Cl2)
#ap.data[ati] = 25.84 - 0.7973 * ap.data[ati] # scale is based on F2H2, aromatic + aliphatic protons # M06-2X/6-31G(d)+PCM(CH2Cl2)
ap.data[ati] = ap.data[ati]
if 'nmr_carbon' in geom.atprops:
ap = geom.nmr_carbon
for ati in range(len(ap.data)):
#ap.data[ati] = 0.0 + 1.0*(186.1918-ap.data[ati])
ap.data[ati] = -2.614 + 0.918*(186.1918-ap.data[ati]) # scale is based on Ph-tBu
# Transfer charge from hydrogens to heavy atoms
if self.topologies: # For now, we assume that the only possible topology is NBO
topology = self.topologies[0] # Assumtion, might cause troubles
if not topology:
log.info('Cannot extract NBO topology')
else:
for geom in self.geoms: # Loop over geoms
for ap_name in geom.atprops: # Loop over props
if ap_name in self.chash: # Is this prop a charge?
# Retrieve original charges
ap = getattr(geom,ap_name)
# Create new set of charges, where charges on H will be included to heavy atoms
ap_H_name = ap_name + '_H'
ap_H = AtomicProps(attr=ap_H_name,data=[0.0]*len(ap.data))
# Loop over all atoms
for ati in range(len(ap.data)):
atname = geom.atnames.data[ati]
if atname != 'H':
ap_H.data[ati] += ap.data[ati]
continue
top_i = str(ati + 1)
if not top_i in topology.data:
log.info('Cannot extract NBO topology for atom '+top_i)
ap_H.data[ati] += ap.data[ati]
continue
H_connected = topology.data[top_i]
if len(H_connected) != 1:
log.info('Weird topology of molecule')
ap_H.data[ati] += ap.data[ati]
continue
top_heavy = int(list(H_connected.keys())[0])
heavy_at = top_heavy - 1
atname = geom.atnames.data[heavy_at]
# if atname == 'C': Can be used to condense H charges only on C atoms; however, sophisticated selection is needed to apply labels correctly; I will get back to it later
ap_H.data[ati] = 0.0
ap_H.data[heavy_at] += ap.data[ati]
geom.addAtProp(ap_H)
del self.chash
log.debug('Gaussian step (%s) parsed successfully' %(self.JobType))
return
def usage(self):
s = ''
s += 'Computation Node: %s\n' % (self.machine_name)
if hasattr(self,'n_cores'):
s+= '#Cores: %s\n' % (self.n_cores)
s += 'Level of Theory: %s\n' % (self.lot)
s += 'Job type: %s\n' % (self.JobType)
if self.solvent:
s += 'Solvent: %s\n' % (self.solvent)
s += 'Open Shell: %i\n' % (self.openShell)
s += '#Atoms: %i\n' % (self.n_atoms)
s += '#Electrons: %i\n' % (self.n_electrons)
s += '#Gaussian Primitives %i\n' % (self.n_primitives)
if 'opt' in self.JobType:
s += '#Opt Steps %s\n' % (self.n_steps)
if 'td' in self.JobType:
s += '#Excited States %s\n' % (self.n_states)
s += '#SU %.1f\n' % (self.time)
FS = open(self.statfile,'w')
FS.write(s)
FS.close()
#print s
#
#
#
#
#
if __name__ == "__main__":
DebugLevel = logging.DEBUG
logging.basicConfig(level=DebugLevel)
from Settings import Settings
from Top import Top
Top.settings = Settings(from_config_file= True)
from Tools.HTML import HTML
WebPage = HTML()
WebPage.makeHeader()
f = Gaussian()
f.file = sys.argv[1]
#import profile
#profile.run('f.parse()')
f.parse()
f.postprocess()
print(f.steps[0])
b1, b2 = f.webdata()
WebPage.makeLine(b1,b2)
WebPage.makeTail()
WebPage.write()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.