gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2013 xarvh
# Copyright (c) 2013 roger
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 dequis
# Copyright (c) 2015 Dario Giovannetti
# Copyright (c) 2015 Alexander Lozovskoy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from typing import TYPE_CHECKING
from libqtile import hook, utils
from libqtile.backend.base import FloatStates
from libqtile.command.base import CommandObject
from libqtile.log_utils import logger
if TYPE_CHECKING:
from libqtile.command.base import ItemT
class _Group(CommandObject):
"""A container for a bunch of windows
Analogous to workspaces in other window managers. Each client window
managed by the window manager belongs to exactly one group.
A group is identified by its name but displayed in GroupBox widget by its label.
"""
def __init__(self, name, layout=None, label=None):
self.name = name
self.label = name if label is None else label
self.custom_layout = layout # will be set on _configure
self.windows = []
self.tiled_windows = set()
self.qtile = None
self.layouts = []
self.floating_layout = None
# self.focus_history lists the group's windows in the order they
# received focus, from the oldest (first item) to the currently
# focused window (last item); NB the list does *not* contain any
# windows that never received focus; refer to self.windows for the
# complete set
self.focus_history = []
self.screen = None
self.current_layout = None
self.last_focused = None
def _configure(self, layouts, floating_layout, qtile):
self.screen = None
self.current_layout = 0
self.focus_history = []
self.windows = []
self.qtile = qtile
self.layouts = [i.clone(self) for i in layouts]
self.floating_layout = floating_layout
if self.custom_layout is not None:
self.layout = self.custom_layout
self.custom_layout = None
@property
def current_window(self):
try:
return self.focus_history[-1]
except IndexError:
# no window has focus
return None
@current_window.setter
def current_window(self, win):
try:
self.focus_history.remove(win)
except ValueError:
# win has never received focus before
pass
self.focus_history.append(win)
def _remove_from_focus_history(self, win):
try:
index = self.focus_history.index(win)
except ValueError:
# win has never received focus
return False
else:
del self.focus_history[index]
# return True if win was the last item (i.e. it was current_window)
return index == len(self.focus_history)
@property
def layout(self):
return self.layouts[self.current_layout]
@layout.setter
def layout(self, layout):
"""
Parameters
==========
layout :
a string with matching the name of a Layout object.
"""
for index, obj in enumerate(self.layouts):
if obj.name == layout:
self.current_layout = index
hook.fire("layout_change", self.layouts[self.current_layout], self)
self.layout_all()
return
logger.error("No such layout: {}".format(layout))
def use_layout(self, index):
assert -len(self.layouts) <= index < len(self.layouts), "layout index out of bounds"
self.layout.hide()
self.current_layout = index % len(self.layouts)
hook.fire("layout_change", self.layouts[self.current_layout], self)
self.layout_all()
screen_rect = self.screen.get_rect()
self.layout.show(screen_rect)
def use_next_layout(self):
self.use_layout((self.current_layout + 1) % (len(self.layouts)))
def use_previous_layout(self):
self.use_layout((self.current_layout - 1) % (len(self.layouts)))
def layout_all(self, warp=False):
"""Layout the floating layer, then the current layout.
If we have have a current_window give it focus, optionally moving warp
to it.
"""
if self.screen and self.windows:
with self.qtile.core.masked():
normal = [x for x in self.windows if not x.floating]
floating = [x for x in self.windows if x.floating and not x.minimized]
screen_rect = self.screen.get_rect()
if normal:
try:
self.layout.layout(normal, screen_rect)
except Exception:
logger.exception("Exception in layout %s", self.layout.name)
if floating:
self.floating_layout.layout(floating, screen_rect)
if self.current_window and self.screen == self.qtile.current_screen:
self.current_window.focus(warp)
else:
# Screen has lost focus so we reset record of focused window so
# focus will warp when screen is focused again
self.last_focused = None
def set_screen(self, screen, warp=True):
"""Set this group's screen to screen"""
if screen == self.screen:
return
self.screen = screen
if self.screen:
# move all floating guys offset to new screen
self.floating_layout.to_screen(self, self.screen)
self.layout_all(warp=warp and self.qtile.config.cursor_warp)
screen_rect = self.screen.get_rect()
self.floating_layout.show(screen_rect)
self.layout.show(screen_rect)
else:
self.hide()
def hide(self):
self.screen = None
with self.qtile.core.masked():
for i in self.windows:
i.hide()
self.layout.hide()
def focus(self, win, warp=True, force=False):
"""Focus the given window
If win is in the group, blur any windows and call ``focus`` on the
layout (in case it wants to track anything), fire focus_change hook and
invoke layout_all.
Parameters
==========
win :
Window to focus
warp :
Warp pointer to win. This should basically always be True, unless
the focus event is coming from something like EnterNotify, where
the user is actively using the mouse, or on full screen layouts
where only one window is "maximized" at a time, and it doesn't make
sense for the mouse to automatically move.
"""
if self.qtile._drag and not force:
# don't change focus while dragging windows (unless forced)
return
if win:
if win not in self.windows:
return
# ignore focus events if window is the current window
if win is self.last_focused:
warp = False
self.current_window = win
self.last_focused = self.current_window
if win.floating:
for layout in self.layouts:
layout.blur()
self.floating_layout.focus(win)
else:
self.floating_layout.blur()
for layout in self.layouts:
layout.focus(win)
hook.fire("focus_change")
self.layout_all(warp)
def info(self):
return dict(
name=self.name,
label=self.label,
focus=self.current_window.name if self.current_window else None,
tiled_windows={i.name for i in self.tiled_windows},
windows=[i.name for i in self.windows],
focus_history=[i.name for i in self.focus_history],
layout=self.layout.name,
layouts=[i.name for i in self.layouts],
floating_info=self.floating_layout.info(),
screen=self.screen.index if self.screen else None,
)
def add(self, win, focus=True, force=False):
hook.fire("group_window_add", self, win)
if win not in self.windows:
self.windows.append(win)
win.group = self
if self.qtile.config.auto_fullscreen and win.wants_to_fullscreen:
win._float_state = FloatStates.FULLSCREEN
elif self.floating_layout.match(win) and not win.fullscreen:
win._float_state = FloatStates.FLOATING
if win.floating and not win.fullscreen:
self.floating_layout.add(win)
if not win.floating or win.fullscreen:
self.tiled_windows.add(win)
for i in self.layouts:
i.add(win)
if focus:
self.focus(win, warp=True, force=force)
def remove(self, win, force=False):
self.windows.remove(win)
hadfocus = self._remove_from_focus_history(win)
win.group = None
if win.floating:
nextfocus = self.floating_layout.remove(win)
nextfocus = (
nextfocus
or self.current_window
or self.layout.focus_first()
or self.floating_layout.focus_first(group=self)
)
# Remove from the tiled layouts if it was not floating or fullscreen
if not win.floating or win.fullscreen:
for i in self.layouts:
if i is self.layout:
nextfocus = i.remove(win)
else:
i.remove(win)
nextfocus = (
nextfocus
or self.floating_layout.focus_first(group=self)
or self.current_window
or self.layout.focus_first()
)
if win in self.tiled_windows:
self.tiled_windows.remove(win)
# a notification may not have focus
if hadfocus:
self.focus(nextfocus, warp=True, force=force)
# no next focus window means focus changed to nothing
if not nextfocus:
hook.fire("focus_change")
elif self.screen:
self.layout_all()
def mark_floating(self, win, floating):
if floating:
if win in self.floating_layout.find_clients(self):
# already floating
pass
else:
# Remove from the tiled windows list if the window is not fullscreen
if not win.fullscreen:
self.tiled_windows.remove(win)
# Remove the window from the layout if it is not fullscreen
for i in self.layouts:
i.remove(win)
if win is self.current_window:
i.blur()
self.floating_layout.add(win)
if win is self.current_window:
self.floating_layout.focus(win)
else:
self.floating_layout.remove(win)
self.floating_layout.blur()
# A window that was fullscreen should only be added if it was not a tiled window
if win not in self.tiled_windows:
for i in self.layouts:
i.add(win)
self.tiled_windows.add(win)
if win is self.current_window:
for i in self.layouts:
i.focus(win)
self.layout_all()
def _items(self, name) -> ItemT:
if name == "layout":
return True, list(range(len(self.layouts)))
if name == "screen" and self.screen is not None:
return True, []
if name == "window":
return self.current_window is not None, [i.wid for i in self.windows]
return None
def _select(self, name, sel):
if name == "layout":
if sel is None:
return self.layout
return utils.lget(self.layouts, sel)
if name == "screen":
return self.screen
if name == "window":
if sel is None:
return self.current_window
for i in self.windows:
if i.wid == sel:
return i
raise RuntimeError("Invalid selection: {}".format(name))
def cmd_setlayout(self, layout):
self.layout = layout
def cmd_info(self):
"""Returns a dictionary of info for this group"""
return self.info()
def cmd_toscreen(self, screen=None, toggle=False):
"""Pull a group to a specified screen.
Parameters
==========
screen :
Screen offset. If not specified, we assume the current screen.
toggle :
If this group is already on the screen, then the group is toggled
with last used
Examples
========
Pull group to the current screen::
toscreen()
Pull group to screen 0::
toscreen(0)
"""
if screen is None:
screen = self.qtile.current_screen
else:
screen = self.qtile.screens[screen]
if screen.group == self:
if toggle:
screen.toggle_group(self)
else:
screen.set_group(self)
def _get_group(self, direction, skip_empty=False, skip_managed=False):
"""Find a group walking the groups list in the specified direction
Parameters
==========
skip_empty :
skips the empty groups
skip_managed :
skips the groups that have a screen
"""
def match(group):
from libqtile import scratchpad
if group is self:
return True
if skip_empty and not group.windows:
return False
if skip_managed and group.screen:
return False
if isinstance(group, scratchpad.ScratchPad):
return False
return True
groups = [group for group in self.qtile.groups if match(group)]
index = (groups.index(self) + direction) % len(groups)
return groups[index]
def get_previous_group(self, skip_empty=False, skip_managed=False):
return self._get_group(-1, skip_empty, skip_managed)
def get_next_group(self, skip_empty=False, skip_managed=False):
return self._get_group(1, skip_empty, skip_managed)
def cmd_unminimize_all(self):
"""Unminimise all windows in this group"""
for win in self.windows:
win.minimized = False
self.layout_all()
def cmd_next_window(self):
"""
Focus the next window in group.
Method cycles _all_ windows in group regardless if tiled in current
layout or floating. Cycling of tiled and floating windows is not mixed.
The cycling order depends on the current Layout.
"""
if not self.windows:
return
if self.current_window.floating:
nxt = (
self.floating_layout.focus_next(self.current_window)
or self.layout.focus_first()
or self.floating_layout.focus_first(group=self)
)
else:
nxt = (
self.layout.focus_next(self.current_window)
or self.floating_layout.focus_first(group=self)
or self.layout.focus_first()
)
self.focus(nxt, True)
def cmd_prev_window(self):
"""
Focus the previous window in group.
Method cycles _all_ windows in group regardless if tiled in current
layout or floating. Cycling of tiled and floating windows is not mixed.
The cycling order depends on the current Layout.
"""
if not self.windows:
return
if self.current_window.floating:
nxt = (
self.floating_layout.focus_previous(self.current_window)
or self.layout.focus_last()
or self.floating_layout.focus_last(group=self)
)
else:
nxt = (
self.layout.focus_previous(self.current_window)
or self.floating_layout.focus_last(group=self)
or self.layout.focus_last()
)
self.focus(nxt, True)
def cmd_focus_back(self):
"""
Focus the window that had focus before the current one got it.
Repeated calls to this function would basically continuously switch
between the last two focused windows. Do nothing if less than 2
windows ever received focus.
"""
try:
win = self.focus_history[-2]
except IndexError:
pass
else:
self.focus(win)
def cmd_focus_by_name(self, name):
"""
Focus the first window with the given name. Do nothing if the name is
not found.
"""
for win in self.windows:
if win.name == name:
self.focus(win)
break
def cmd_info_by_name(self, name):
"""
Get the info for the first window with the given name without giving it
focus. Do nothing if the name is not found.
"""
for win in self.windows:
if win.name == name:
return win.info()
def cmd_switch_groups(self, name):
"""Switch position of current group with name"""
self.qtile.cmd_switch_groups(self.name, name)
def cmd_set_label(self, label):
"""
Set the display name of current group to be used in GroupBox widget.
If label is None, the name of the group is used as display name.
If label is the empty string, the group is invisible in GroupBox.
"""
self.label = label if label is not None else self.name
hook.fire("changegroup")
def __repr__(self):
return "<group.Group (%r)>" % self.name
|
|
"""MSAView - Uniprot support.
Copyright (c) 2011 Joel Hedlund.
Contact: Joel Hedlund <[email protected]>
MSAView is a modular, configurable and extensible package for analysing and
visualising multiple sequence alignments and sequence features.
This package provides support for downloading/saving uniprot information, and
for displaying uniprot sequence features.
If you have problems with this package, please contact the author.
Copyright
=========
The MIT License
Copyright (c) 2011 Joel Hedlund.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__version__ = "0.9.0"
import re
from xml.etree import ElementTree as etree
import gio
from msaview import presets
from msaview.action import (Action,
register_action)
from msaview.computation import (DownloadTask,
SimpleETreeDownloader)
from msaview.features import SequenceFeature
from msaview.options import Option
from msaview.selection import Region
from msaview.sequence_information import SequenceInformation
presets.add_to_preset_path(__file__)
class UniprotID(SequenceInformation):
category = 'uniprot-id'
@classmethod
def extract_id(cls, id):
uniprot_ac = re.compile(r'\b(?P<id>[A-NR-Z][0-9][A-Z][A-Z0-9][A-Z0-9][0-9]|[OPQ][0-9][A-Z0-9][A-Z0-9][A-Z0-9][0-9])(\b|_)')
sprot_id = re.compile(r'\b(?P<id>[A-Z0-9]{1,5}_[A-Z0-9]{3,5})\b')
for r in (uniprot_ac, sprot_id):
m = r.search(id)
if m:
return m.group('id')
@classmethod
def from_msa_sequence(cls, msa, sequence_index):
id = cls.extract_id(msa.ids[sequence_index])
return cls(sequence_index, id or None)
class UniprotETree(SequenceInformation):
category = 'uniprot-etree'
def __init__(self, sequence_index, sequence_id=None, root=None, offset=None):
self.sequence_index = sequence_index
self.sequence_id = sequence_id
self.root = root
self.offset = offset
class UniprotXMLDownloadTask(DownloadTask):
url = "http://www.ebi.ac.uk/cgi-bin/dbfetch?db=uniprotkb&format=uniprotxml&style=raw&id=%s"
downloader_class = SimpleETreeDownloader
def get_urls(self):
ids = ','.join(id for i, id in self.id_enumeration[self.progress:self.progress+self.batch_size])
return [self.url % ids]
def parse_downloads(self, data):
entries = []
for sequence_index, sequence_id in self.id_enumeration[self.progress:self.progress+self.batch_size]:
for entry_element in data[0].getroot().getchildren():
element_ids = [e.text for e in entry_element.findall('accession')]
element_ids.append(entry_element.find('name').text)
if sequence_id not in element_ids:
continue
offset = entry_element.find('sequence').text.find(self.msa.unaligned[sequence_index].upper())
if offset < 0:
continue
entries.append(UniprotETree(sequence_index, sequence_id, entry_element, offset))
break
else:
entries.append(UniprotETree(sequence_index, sequence_id, None, None))
return entries
def update_progress(self, data):
self.progress = min(self.total, self.progress + self.batch_size)
def handle_id_category_changed(self, sequence_information, change):
if change.has_changed('uniprot-id'):
self.abort()
def get_id_entry_for_sequence(msa, sequence_index):
entry = msa.sequence_information.setdefault('uniprot-id')[sequence_index]
if entry is None:
entry = UniprotID.from_msa_sequence(msa, sequence_index)
msa.sequence_information.add_entries(entry)
return entry
def get_populated_uniprot_id_category(msa):
new_entries = []
for sequence_index, entry in enumerate(msa.sequence_information.setdefault('uniprot-id')):
if entry is not None:
continue
id = UniprotID.from_msa_sequence(msa, sequence_index)
if id:
new_entries.append(id)
msa.sequence_information.add_entries(new_entries)
return msa.sequence_information.categories['uniprot-id']
def dbfetch_uniprot_xml_for_sequence(msa, sequence_index):
id_entry = msa.sequence_information.setdefault('uniprot-id')[sequence_index]
if not id_entry:
id_entry = UniprotID.from_msa_sequence(msa, sequence_index)
msa.sequence_information.add_entries(id_entry)
if not id_entry.sequence_id:
return
task = UniprotXMLDownloadTask(msa, [(sequence_index, id_entry.sequence_id)])
task.run()
return task.results[0]
class UniprotSequenceFeature(SequenceFeature):
def __init__(self, sequence_index=None, sequence_id=None, source=None, name=None, region=None, mapping=None, description=None, original_description=None, status=None, id=None, evidence=None, ref=None, original=None, variation=None):
self.sequence_index = sequence_index
self.sequence_id = sequence_id
self.source = source
self.name = name
self.region = region
self.mapping = mapping
self.description = description
self.status = status
self.id = id
self.evidence = evidence
self.ref = ref
self.original = original
self.variation = variation
def is_similarly_annotated(self, other):
if (other.source.lower() != self.source.lower() or
other.name.lower() != self.name.lower()):
return False
try:
if other.description is self.description is None:
return True
return other.description.lower() == self.description.lower()
except:
return False
@classmethod
def from_element(cls, element):
source = 'UniProtKB'
name = element.attrib['type']
description = None
position = element.find('location/position')
if position is None:
start = int(element.find('location/begin').attrib['position']) - 1
length = int(element.find('location/end').attrib['position']) - start
else:
start = int(position.attrib['position']) - 1
length = 1
region = Region(start, length)
original_description = element.attrib.get('description', None)
attrib_order = ['status', 'evidence', 'id', 'ref']
d = dict((a, element.attrib.get(a, None)) for a in attrib_order)
d['original'] = element.find('original')
if d['original'] is not None:
d['original'] = d['original'].text
d['variation'] = element.find('variation')
if d['variation'] is not None:
d['variation'] = d['variation'].text
desc = []
if original_description and not original_description.startswith('('):
desc.append(original_description)
if d['original']:
variation = "%s->%s" % (d['original'], d['variation'])
desc.append(variation)
attrs = ["%s=%s" % (a, d[a]) for a in attrib_order[:-2] if d[a]]
if attrs:
desc.append(', '.join(attrs))
if desc:
description = '; '.join(desc)
return cls(source=source, name=name, region=region, description=description, **d)
def get_uniprot_features(uniprot_etree_entry, msa):
offset = uniprot_etree_entry.offset or 0
if uniprot_etree_entry.root is None:
return []
features = []
for element in uniprot_etree_entry.root.findall('feature'):
feature = UniprotSequenceFeature.from_element(element)
feature.sequence_index = uniprot_etree_entry.sequence_index
feature.sequence_id = uniprot_etree_entry.sequence_id
feature.map_to_msa(msa, offset)
if not feature.mapping:
continue
features.append(feature)
return features
class ImportUniprotFeatures(Action):
action_name = 'import-uniprot-features'
path = ['Import', 'Sequence features', 'UniProtKB annotations']
tooltip = 'Import feature annotations for all UniProtKB sequences.'
batch_size = 10
@classmethod
def applicable(cls, target, coord=None):
if target.msaview_classname != 'data.msa':
return
if not target:
return
if ('uniprot-etree' in target.sequence_information.categories or
'uniprot-ids' in target.sequence_information.categories):
return cls(target, coord)
for id in target.ids:
if UniprotID.extract_id(id):
return cls(target, coord)
def run(self):
id_entries = get_populated_uniprot_id_category(self.target)
etree_entries = self.target.sequence_information.setdefault('uniprot-etree')
features = []
id_enumeration = []
for sequence_index, etree_entry in enumerate(etree_entries):
if etree_entry is not None:
features.extend(get_uniprot_features(etree_entry, self.target))
continue
id_entry = id_entries[sequence_index]
sequence_id = (id_entry and id_entry.sequence_id)
if sequence_id:
id_enumeration.append((sequence_index, sequence_id))
self.target.features.add_features(features)
task = UniprotXMLDownloadTask(self.target, id_enumeration, self.batch_size)
def add_new_features(t, progress, finished, entries):
for entry in entries or []:
new_features = get_uniprot_features(entry, self.target)
self.target.features.add_features(new_features)
task.connect('progress', add_new_features)
task.connect('progress', lambda t, progress, finished, entries: self.target.sequence_information.add_entries(entries or []))
self.target.sequence_information.connect('changed', task.handle_id_category_changed)
self.target.get_compute_manager().timeout_add(100, task)
return task
register_action(ImportUniprotFeatures)
class ImportUniprotFeaturesForSequence(Action):
action_name = 'import-uniprot-features-for-sequence'
path = ['Import', 'Sequence features', 'UniProtKB annotations (single sequence)']
tooltip = 'Import feature annotations for the sequence from UniProtKB.'
@classmethod
def applicable(cls, target, coord=None):
if not coord or coord.sequence is None:
return
if target.msaview_classname != 'data.msa':
return
if (target.sequence_information.get_entry('uniprot-etree', coord.sequence) or
target.sequence_information.get_entry('uniprot-id', coord.sequence)):
return cls(target, coord)
if UniprotID.extract_id(target.ids[coord.sequence]):
return cls(target, coord)
def run(self):
sequence_index = self.coord.sequence
etree_category = self.target.sequence_information.setdefault('uniprot-etree')
entry = etree_category[sequence_index]
if entry is None:
entry = dbfetch_uniprot_xml_for_sequence(self.target, sequence_index)
if not entry:
return
self.target.sequence_information.add_entries(entry)
if entry.root is None:
return
features = get_uniprot_features(entry, self.target)
self.target.features.add_features(features)
register_action(ImportUniprotFeaturesForSequence)
class ShowSequenceInUniprotWebInterface(Action):
action_name = 'show-sequence-in-uniprot-web-interface'
path = ['Web interface', 'UniProtKB', 'Show protein view for %r']
tooltip = 'Show sequence in the UniProtKB web interface.'
url = 'http://www.uniprot.org/uniprot/'
@classmethod
def applicable(cls, target, coord=None):
if not coord or coord.sequence is None:
return
if target.msaview_classname != 'data.msa':
return
entry = target.sequence_information.get_entry('uniprot-id', coord.sequence)
if not entry:
entry = UniprotID.from_msa_sequence(target, coord.sequence)
if not entry.sequence_id:
return
a = cls(target, coord)
a.path = list(cls.path)
a.path[-1] %= entry.sequence_id
return a
def run(self):
sequence_index = self.coord.sequence
entry = get_id_entry_for_sequence(self.target, sequence_index)
gio.app_info_get_default_for_uri_scheme('http').launch_uris([self.url + entry.sequence_id])
register_action(ShowSequenceInUniprotWebInterface)
class ImportUniprotXML(Action):
action_name = 'import-uniprot-xml'
path = ['Import', 'Sequence information', 'Download UniProtKB XML']
tooltip = 'Download UniProtKB XML data for all sequences.'
batch_size = 50
@classmethod
def applicable(cls, target, coord=None):
if target.msaview_classname != 'data.msa':
return
if not target:
return
if 'uniprot-ids' in target.sequence_information.categories:
return cls(target, coord)
for id in target.ids:
if UniprotID.extract_id(id):
return cls(target, coord)
def run(self):
id_entries = get_populated_uniprot_id_category(self.target)
etree_entries = self.target.sequence_information.setdefault('uniprot-etree')
id_enumeration = []
for sequence_index, etree_entry in enumerate(etree_entries):
if etree_entry is not None:
continue
id_entry = id_entries[sequence_index]
sequence_id = (id_entry and id_entry.sequence_id)
if sequence_id:
id_enumeration.append((sequence_index, sequence_id))
task = UniprotXMLDownloadTask(self.target, id_enumeration, self.batch_size)
task.connect('progress', lambda t, progress, finished, entries: self.target.sequence_information.add_entries(entries or []))
self.target.sequence_information.connect('changed', task.handle_id_category_changed)
self.target.get_compute_manager().timeout_add(100, task)
return task
register_action(ImportUniprotXML)
class ImportUniprotXMLForSequence(Action):
action_name = 'import-uniprot-xml-for-sequence'
path = ['Import', 'Sequence information', 'Download UniProtKB XML (single sequence)']
tooltip = 'Download UniProtKB XML data for the sequence.'
url = 'http://www.uniprot.org/uniprot/'
@classmethod
def applicable(cls, target, coord=None):
if not coord or coord.sequence is None:
return
if target.msaview_classname != 'data.msa':
return
entry = target.sequence_information.get_entry('uniprot-id', coord.sequence)
if not entry:
entry = UniprotID.from_msa_sequence(target, coord.sequence)
if not entry.sequence_id:
return
return cls(target, coord)
def run(self):
sequence_index = self.coord.sequence
self.target.sequence_information.setdefault('uniprot-etree')
etree_entry = dbfetch_uniprot_xml_for_sequence(self.target, sequence_index)
self.target.sequence_information.add_entries(etree_entry)
register_action(ImportUniprotXMLForSequence)
class SaveUniprotXML(Action):
action_name = 'save-uniprot-xml'
path = ['Export', 'Sequence information', 'UniProtKB XML']
tooltip = 'Save UniProtKB XML data.'
uniprot_xml_declaration = """<?xml version="1.0" encoding="UTF-8"?>"""
uniprot_xml_root_element = """<uniprot xmlns="http://uniprot.org/uniprot" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://uniprot.org/uniprot http://www.uniprot.org/support/docs/uniprot.xsd"/>"""
@classmethod
def applicable(cls, target, coord=None):
if target.msaview_classname != 'data.msa':
return
try:
(True for e in target.sequence_information.categories.get('uniprot-etree', []) if (e and e.root)).next()
except StopIteration:
return
return cls(target, coord)
def get_options(self):
path = 'uniprot.xml'
if self.target and self.target.path:
path = self.target.path + '.uniprot.xml'
return [Option(None, 'path', path, path, 'Path', 'Where to save the UniProtKB XML data.')]
def run(self):
root = etree.XML(self.uniprot_xml_root_element)
tree = etree.ElementTree(root)
for entry in self.target.sequence_information.categories['uniprot-etree']:
if not (entry and entry.root):
continue
root.append(entry.root)
f = open(self.params['path'], 'w')
tree.write(f)
f.close()
register_action(ImportUniprotXMLForSequence)
|
|
'''
MODIFIED. Inspiration taken from the ref link below.
ref: https://raw.githubusercontent.com/kuza55/keras-extras/master/utils/multi_gpu.py
The inspirational one carried license:
Apache License
Version 2.0, January 2004
For further info refer to: https://github.com/kuza55/keras-extras
Also used https://github.com/fchollet/keras/issues/2436 which was just
posted as code snippets in a forum.
DEPRECATED Features:
NCCL - This module was an attempt at using Tensorflow nccl module:
from tensorflow.contrib import nccl
The nccl contrib package is broken:
https://github.com/tensorflow/tensorflow/issues/17908
StagingArea - It is not straightforward to use StagingArea for prefetching
to device (data_flow_ops.StagingArea):
from tensorflow.python.ops import data_flow_ops
Instead one can use the Tensorflow Dataset API with prefetch_to_device:
tf.contrib.data.prefetch_to_device
The StagingArea is deprecated in favor of prefetch_to_device. But
prefetching to device with the multigpu implementation in this module,
whereby a batch slicing/split layer is inserted after the input layer to
pipeline to each device, is not straightforward. The prefetch_to_device
works well with Horovod multigpu distribution where each process is
cleanly mapped to a GPU and it is straightforward to split the data
pipelilne.
''' # noqa
from __future__ import print_function
import sys
# import time
from itertools import chain
import warnings
from keras import backend as KB
from keras.layers.core import Lambda
from keras.models import Model
from keras.layers.merge import Concatenate # , Average)
# import keras.layers as KL
import keras.optimizers as KO
from keras.utils import multi_gpu_model
from keras_exp._utils import Capturing
if KB.backend() == 'tensorflow':
# Monkey patch Keras back-end to use Function with enqueue.
# import keras_exp._patch_tf_backend as tfbpatch
# tfbpatch.patch()
# from keras_exp._patch_tf_backend import patch as tfbpatch
# tfbpatch()
import tensorflow as tf
from tensorflow.python.client import device_lib
try:
from tensorflow.contrib import nccl
have_nccl = True
print('NCCL support available', file=sys.stderr)
except ImportError:
have_nccl = False
print('WARNING: NCCL support not available', file=sys.stderr)
from tensorflow.python.ops import data_flow_ops
_DEBUG = False
__all__ = ('get_available_gpus', 'make_parallel', 'print_mgpu_modelsummary',
'ModelKerasMGPU', 'ModelMGPU')
def get_available_gpus(ngpus=-1):
'''
:param int ngpus: GPUs max to use. Default -1 means all gpus.
:returns: List of gpu devices. Ex.: ['/gpu:0', '/gpu:1', ...]
'''
local_device_protos = device_lib.list_local_devices()
gpus_list = [x.name for x in local_device_protos if x.device_type == 'GPU']
return gpus_list[:ngpus] if ngpus > -1 else gpus_list
def print_mgpu_modelsummary(model):
'''Prints the summary for a multi-GPU keras model.
:param model: Keras model.
:type model: Model
'''
# print json.dumps(model.get_config(), indent=2) # DEBUG
print('\nMULTI-GPU MODEL: {}'.format(model.name))
print(model.summary())
for layer in model.layers:
# print 'layer:', layer, '\ttype:', type(layer)
if isinstance(layer, Model):
submodel = layer
print('\n\tSUBMODEL SUMMARY: {}'.format(layer.name))
with Capturing() as msum:
minfo = submodel.summary()
print('\t{}\n\t{}\n'.format('\n\t'.join(msum), minfo))
class ModelKerasMGPU(Model):
'''
Wrapper class around "keras.utils.multi_gpu_model". This class enabled
loading and saving transparently.
'''
def __init__(self, ser_model, gpus): # @IgnorePep8 pylint: disable=super-init-not-called
pmodel = multi_gpu_model(ser_model, gpus)
# mimic copy constructor via __dict__ update, hence no super-init
self.__dict__.update(pmodel.__dict__)
self._smodel = ser_model
def __getattribute__(self, attrname):
'''Override load and save methods to be used from the serial-model. The
serial-model holds references to the weights in the multi-gpu model.
'''
# return Model.__getattribute__(self, attrname)
if 'load' in attrname or 'save' in attrname:
return getattr(self._smodel, attrname)
return super(ModelKerasMGPU, self).__getattribute__(attrname)
def all_sync_params(tower_params, devices, usenccl=True):
"""Assigns the params from the first tower to all others"""
if len(devices) == 1:
return tf.no_op()
sync_ops = []
if have_nccl and usenccl:
for param_on_devices in zip(*tower_params):
# print('PARAM_ON_DEVICES: {}'.format(param_on_devices)) # DEBUG
# Note: param_on_devices is [paramX_gpu0, paramX_gpu1, ...]
param0 = param_on_devices[0]
send_op, received_tensors = nccl.broadcast(param0, devices[1:])
sync_ops.append(send_op)
for device, param, received in zip(devices[1:],
param_on_devices[1:],
received_tensors):
with tf.device(device):
sync_op = param.assign(received)
sync_ops.append(sync_op)
else:
params0 = tower_params[0]
for device, params in zip(devices, tower_params):
with tf.device(device):
for param, param0 in zip(params, params0):
sync_op = param.assign(param0.read_value())
sync_ops.append(sync_op)
return tf.group(*sync_ops)
# def stage(tensors):
# """Stages the given tensors in a StagingArea for asynchronous put/get.
# """
# stage_area = data_flow_ops.StagingArea(
# dtypes=[tensor.dtype for tensor in tensors],
# shapes=[tensor.get_shape() for tensor in tensors])
# put_op = stage_area.put(tensors)
# get_tensors = stage_area.get()
# if not isinstance(get_tensors, list):
# get_tensors = [get_tensors]
# # print('GET_TENSORS: {}'.format(get_tensors)) # DEBUG
#
# get_tensors = [tf.reshape(gt, t.get_shape())
# for (gt, t) in zip(get_tensors, tensors)]
# return put_op, get_tensors
class ModelMGPU(Model):
'''Override load and save methods of the multi-gpu model. The load and
save should correspond to the serial model's load and save.
If there are other idiosyncracies to handle for multi-gpu model case then
these can be handled in this subclass. A serial model should always be
instantiated prior to wrapping it or converting it to a multi-GPU model.
This multi-gpu implementation uses data-parallelism.
A copy-constructor is not implemented so optionally pass any additional
parameters besides inputs/outputs as args/kwargs to initialize the
multi-gpu model the same way as the serial model. Typically not needed.
Currently, it seems that using NCCL and synchronizing/averaging gradients
slows multi-gpu processing down.
.. seealso::
Refer to :func:`make_parallel` docstring for scenarios when
out-of-memory errors might occur and workaround.
Kwargs:
:param Model serial_model: Serial i.e. non-multi GPU Keras model. REQUIRED.
:param list gdev_list: List of gpu devices i.e. ['/gpu:0', '/gpu:1', ...]
Use function get_available_gpus to get the list of available gpus.
This can be a list of strings or list of instances of tf.DeviceSpec.
REQUIRED.
:param str ps_device: Parameter server device to use.
:param bool usenccl: Use the contrib.nccl Tensorflow library for initial
parameter synchronization and gradients averaging. Note, the models
usenccl option overrides the optimizers usenccl option.
Default: False
Raises RuntimeError if specified True and a non-multi-gpu optimizer is
passed during compile stage.
:param bool initsync: Synchronize initial Variables i.e. weights,
biases, etc. Default: True
:param bool syncopt: Synchronize gradients. Requires a multi-gpu optimizer.
Default: False
:param bool enqueue: Use StagingArea in the multi-GPU model. Could
potentially speed up Host-to-Device transfers.
Produces a warning that kwargs are ignored for Tensorflow. The
_patch_tf_backend module mokey patches the Function in
tensorflow_backend to use the enqueue_ops option.
Default: False
'''
def __init__(self, *args, **kwargs):
# :param model_creator: Callable that returns a serial i.e. non-multi
# GPU Keras model i.e. a keras.models.Model model. REQUIRED.
# Suggestion, use partial from functools to setup model_creator.
# try:
# model_creator = kwargs.pop('model_creator')
# except KeyError:
# raise RuntimeError('Keyword argument "model_creator" required '
# 'for ModelMGPU.')
super(ModelMGPU, self).__init__()
try:
smodel = kwargs.pop('serial_model')
except KeyError:
raise RuntimeError('Keyword argument "serial_model" required '
'for ModelMGPU.')
# SET STATE: Instance of serial model for checkpointing
self._smodel = smodel # model_creator()
try:
gdev_list = kwargs.pop('gdev_list')
except KeyError:
raise RuntimeError('Keyword argument "gdev_list" required '
'for ModelMGPU.')
self._gdev_list = gdev_list
mname = kwargs.pop('name', self._smodel.name)
kwargs['name'] = mname
self._ps_device = kwargs.pop('ps_device', '/cpu:0')
self._initsync = kwargs.pop('initsync', True)
self._usenccl = kwargs.pop('usenccl', False)
self._syncopt = kwargs.pop('syncopt', False)
self._enqueue = kwargs.pop('enqueue', False)
if self._enqueue:
warnings.warn('Enqueue option to use StagingArea currenctly does '
'not work.', UserWarning)
# NOTE: To use staging have to patch keras tensorflow_backend.Function.
# Function implementation in keras_exp.multigpu._patch_tf_backend
self._enqueue_ops = []
self._tower_params = [] # For init/sync'ing of parameters.
kwargs_ = self._init_make_dataparallel(gdev_list, **kwargs)
super(ModelMGPU, self).__init__(*args, **kwargs_)
def __getattribute__(self, attrname):
'''Override load and save methods to be used from the serial-model. The
serial-model holds references to the weights in the multi-gpu model.
'''
# return Model.__getattribute__(self, attrname)
if 'load' in attrname or 'save' in attrname:
return getattr(self._smodel, attrname)
return super(ModelMGPU, self).__getattribute__(attrname)
# ref: https://github.com/fchollet/keras/issues/2436
def _init_make_dataparallel(self, gdev_list, **kwargs):
'''Uses data-parallelism to convert a serial model to multi-gpu. Refer
to make_parallel doc.
'''
gpucopy_ops = []
def slice_batch(x, ngpus, part, dev):
'''Divide the input batch into [ngpus] slices, and obtain slice
no. [part]. i.e. if len(x)=10, then slice_batch(x, 2, 1) will
return x[5:].
'''
sh = KB.shape(x)
L = sh[0] // ngpus
if part == ngpus - 1:
xslice = x[part * L:]
else:
xslice = x[part * L:(part + 1) * L]
# tf.split fails if batch size is not divisible by ngpus. Error:
# InvalidArgumentError (see above for traceback): Number of
# ways to split should evenly divide the split dimension
# xslice = tf.split(x, ngpus)[part]
if not self._enqueue:
return xslice
# Did not see any benefit.
with tf.device(dev):
# if self._stager is None:
stager = data_flow_ops.StagingArea(
dtypes=[xslice.dtype], shapes=[xslice.shape])
stage = stager.put([xslice])
gpucopy_ops.append(stage)
# xslice_stage = stager.get()
return stager.get()
ngpus = len(gdev_list)
if ngpus < 2:
raise RuntimeError('Number of gpus < 2. Require two or more GPUs '
'for multi-gpu model parallelization.')
model = self._smodel
noutputs = len(self._smodel.outputs)
global_scope = tf.get_variable_scope()
towers = [[] for _ in range(noutputs)]
for idev, dev in enumerate(gdev_list):
# TODO: The last slice could cause a gradient calculation outlier
# when averaging gradients. Maybe insure ahead of time that the
# batch_size is evenly divisible by number of GPUs, or maybe don't
# use the last slice.
with tf.device(self._ps_device):
slices = [] # multi-input case
for ix, x in enumerate(model.inputs):
slice_g = Lambda(
slice_batch, # lambda shape: shape,
# lambda shape: x.shape.as_list(),
name='stage_cpuSliceIn{}_Dev{}'.format(ix, idev),
arguments={'ngpus': ngpus, 'part': idev,
'dev': dev})(x)
slices.append(slice_g)
# print('SLICE_G: {}'.format(slice_g)) # DEBUG
# print('SLICES: {}'.format(slices)) # DEBUG
# with tf.variable_scope('GPU_%i' % idev), \
# tf.variable_scope(global_scope, reuse=idev > 0), \
# tf.variable_scope('GPU_{}'.format(idev),
# reuse=idev > 0) as var_scope, \
with tf.device(dev), \
tf.variable_scope(global_scope, reuse=idev > 0), \
tf.name_scope('tower_%i' % idev):
# NOTE: Currently not using model_creator. Did not observe
# any benefit in such an implementation.
# Instantiate model under device context. More complicated.
# Need to use optimizer synchronization in this scenario.
# model_ = model_creator()
# If using NCCL without re-instantiating the model then must
# set the colocate_gradients_with_ops to False in optimizer.
# if idev == 0:
# # SET STATE: Instance of serial model for checkpointing
# self._smodel = model_ # for ability to checkpoint
# Handle multi-output case
modeltower = model(slices)
if not isinstance(modeltower, list):
modeltower = [modeltower]
for imt, mt in enumerate(modeltower):
towers[imt].append(mt)
params = mt.graph._collections['trainable_variables']
# params = model_.trainable_weights
# params = tf.get_collection(
# tf.GraphKeys.TRAINABLE_VARIABLES, scope=var_scope.name)
# params = modeltower.graph._collections['trainable_variables']
# print('PARAMS: {}'.format(params)) # DEBUG
self._tower_params.append(params)
with tf.device(self._ps_device):
# merged = Concatenate(axis=0)(towers)
merged = [Concatenate(axis=0)(tw) for tw in towers]
# self._enqueue_ops.append(tf.group(*gpucopy_ops))
self._enqueue_ops += gpucopy_ops
kwargs['inputs'] = model.inputs
kwargs['outputs'] = merged
return kwargs
def compile(self, *args, **kwargs):
'''Refer to Model.compile docstring for parameters. Override
functionality is documented below.
:override compile: Override Model.compile method to check for options
that the optimizer is multi-gpu enabled, and synchronize initial
variables.
'''
initsync = self._initsync
usenccl = self._usenccl
opt = kwargs['optimizer']
# if isinstance(opt, str):
if not isinstance(opt, KO.Optimizer):
opt = KO.get(opt)
kwargs['optimizer'] = opt
if self._syncopt and not getattr(opt, 'ismgpu', False):
raise RuntimeError(
'Multi-GPU synchronization model requires a multi-GPU '
'optimizer. Instead got: {}'.format(opt))
opt.usenccl = usenccl
if self._enqueue_ops:
# Produces a warning that kwargs are ignored for Tensorflow. Patch
# Function in tensorflow_backend to use the enqueue_ops option.
kwargs['fetches'] = self._enqueue_ops
super(ModelMGPU, self).compile(*args, **kwargs)
if initsync:
self._run_initsync()
def _run_initsync(self):
# tparams = [list(chain(*tp)) for tp in self._tower_params]
tparams = self._tower_params
# Check to prevent from unnecessarily re-initializing and
# synchronizing, i.e. when the model loads the weights.
for v in chain.from_iterable(tparams):
if getattr(v, '_keras_initialized', False):
return
KB.manual_variable_initialization(True)
sess = KB.get_session()
KB.manual_variable_initialization(False)
# glob_variables = tf.global_variables()
# sess.run(tf.variables_initializer(glob_variables))
# Initialize on GPU0 and sync to other GPUs
init_op = tf.variables_initializer(tparams[0])
# init_op = tf.variables_initializer(self._tower_params[0])
# init_op = tf.variables_initializer(self.trainable_weights)
sess.run(init_op)
# Important if using model_creator. Not necessary of model instance is
# reused in which case the model layers are shared between slices
# and are automatically sync'd.
sync_op = all_sync_params(tparams, self._gdev_list,
usenccl=self._usenccl)
sess.run(sync_op)
for v in chain.from_iterable(tparams):
v._keras_initialized = True
# Data-parallel ref: https://github.com/fchollet/keras/issues/2436
# Tower-parallel:
# ref: https://medium.com/autonomous-agents/multi-gpu-training-of-large-sparse-matrix-on-wide-neuralnetwork-cac7afc52ffe @IgnorePep8
# ref: https://gist.github.com/vvpreetham/1379cc4e208ea33ce3e615067e92fc5e
def make_parallel(serial_model, gdev_list, ps_device='/cpu:0', usenccl=False,
initsync=True, syncopt=False, enqueue=False,
model_class=ModelMGPU):
'''Given a keras model, return an equivalent model which parallelizes
the computation over multiple GPUs listed in the gdev_list.
Data-Parallel:
Each GPU gets a slice of the input batch, applies the model on that slice
and later the outputs of the models are concatenated to a single tensor,
hence the user sees a model that behaves the same as the original.
If getting an out-of-memory (OOM) error when scaling the batch size by the
number of GPUs, there might be input layer(s) in the serial model that runs
additional special operations (such as tranformation of some sort) on the
1st GPU as enumerated by Tensorflow. This was an observed behavior for
Embedding layers. The workaround is to pin such layers to the CPU, or
simply pin the instantiation of the serial mode to CPU. The parallelization
will move the operations to GPU.
:Example:
if mgpu_flag:
with tf.device('/cpu:0'):
# define the serial model.
model_serial = get_model_serial()
gdev_list = get_available_gpus()
model = make_parallel(model_serial, gdev_list)
else:
model = def_model_serial()
:param Model serial_model: Serial i.e. non-multi GPU Keras model.
:param list gdev_list: List of gpu devices i.e. ['/gpu:0', '/gpu:1', ...]
Use function get_available_gpus to get the list of available gpus.
This can be a list of strings or list of instances of tf.DeviceSpec.
:param str ps_device: Parameter server device to use.
:param bool usenccl: Use the contrib.nccl Tensorflow library for initial
parameter synchronization and gradients averaging. Note, the model's
usenccl option overrides the optimizers usenccl option.
Default: False
:param bool initsync: Synchronize initial Variables i.e. weights,
biases, etc. Default: True
:param bool syncopt: Synchronize gradients. Requires a multi-gpu optimizer.
Default: False
:param bool enqueue: Use StagingArea in the multi-GPU model. Could
potentially speed up Host-to-Device transfers.
Produces a warning that kwargs are ignored for Tensorflow. The
_patch_tf_backend module mokey patches the Function in
tensorflow_backend to use the enqueue_ops option.
Default: False
:param model_class: Class object to instantiate for multi-gpu models. This
is needed when the ModelMGPU is mixed-in with other classes.
Default: ModelMGPU
:returns: Multi-GPU parallelized model. If ngpus < 2 then do nothing and
return the provided serial_model.
:rtype: ModelMGPU
'''
ngpus = len(gdev_list)
if ngpus < 2:
return serial_model # model_creator()
return model_class(
serial_model=serial_model, gdev_list=gdev_list,
ps_device=ps_device,
enqueue=enqueue, usenccl=usenccl,
initsync=initsync, syncopt=syncopt)
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake VMware VI API implementation.
"""
import collections
import sys
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from nova import exception
from nova.virt.vmwareapi import constants
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
'files', 'ClusterComputeResource', 'HostStorageSystem',
'Folder']
_FAKE_FILE_SIZE = 1024
_FAKE_VCENTER_UUID = '497c514c-ef5e-4e7f-8d93-ec921993b93a'
_db_content = {}
_array_types = {}
_vim_map = {}
LOG = logging.getLogger(__name__)
def reset():
"""Resets the db contents."""
cleanup()
create_network()
create_folder()
create_host_network_system()
create_host_storage_system()
ds_ref1 = create_datastore('ds1', 1024, 500)
create_host(ds_ref=ds_ref1)
ds_ref2 = create_datastore('ds2', 1024, 500)
create_host(ds_ref=ds_ref2)
create_datacenter('dc1', ds_ref1)
create_datacenter('dc2', ds_ref2)
create_res_pool()
create_cluster('test_cluster', ds_ref1)
create_cluster('test_cluster2', ds_ref2)
def cleanup():
"""Clear the db contents."""
for c in _CLASSES:
# We fake the datastore by keeping the file references as a list of
# names in the db
if c == 'files':
_db_content[c] = []
else:
_db_content[c] = {}
def _create_object(table, table_obj):
"""Create an object in the db."""
_db_content.setdefault(table, {})
_db_content[table][table_obj.obj] = table_obj
def _get_object(obj_ref):
"""Get object for the give reference."""
return _db_content[obj_ref.type][obj_ref]
def _get_objects(obj_type):
"""Get objects of the type."""
lst_objs = FakeRetrieveResult()
for key in _db_content[obj_type]:
lst_objs.add_object(_db_content[obj_type][key])
return lst_objs
def _convert_to_array_of_mor(mors):
"""Wraps the given array into a DataObject."""
array_of_mors = DataObject()
array_of_mors.ManagedObjectReference = mors
return array_of_mors
def _convert_to_array_of_opt_val(optvals):
"""Wraps the given array into a DataObject."""
array_of_optv = DataObject()
array_of_optv.OptionValue = optvals
return array_of_optv
def _create_array_of_type(t):
"""Returns an array to contain objects of type t."""
if t in _array_types:
return _array_types[t]()
array_type_name = 'ArrayOf%s' % t
array_type = type(array_type_name, (DataObject,), {})
def __init__(self):
super(array_type, self).__init__(array_type_name)
setattr(self, t, [])
setattr(array_type, '__init__', __init__)
_array_types[t] = array_type
return array_type()
class FakeRetrieveResult(object):
"""Object to retrieve a ObjectContent list."""
def __init__(self, token=None):
self.objects = []
if token is not None:
self.token = token
def add_object(self, object):
self.objects.append(object)
def _get_object_refs(obj_type):
"""Get object References of the type."""
lst_objs = []
for key in _db_content[obj_type]:
lst_objs.append(key)
return lst_objs
def _update_object(table, table_obj):
"""Update objects of the type."""
_db_content[table][table_obj.obj] = table_obj
class Prop(object):
"""Property Object base class."""
def __init__(self, name=None, val=None):
self.name = name
self.val = val
class ManagedObjectReference(object):
"""A managed object reference is a remote identifier."""
def __init__(self, name="ManagedObject", value=None):
super(ManagedObjectReference, self)
# Managed Object Reference value attributes
# typically have values like vm-123 or
# host-232 and not UUID.
self.value = value
# Managed Object Reference type
# attributes hold the name of the type
# of the vCenter object the value
# attribute is the identifier for
self.type = name
self._type = name
class ObjectContent(object):
"""ObjectContent array holds dynamic properties."""
# This class is a *fake* of a class sent back to us by
# SOAP. It has its own names. These names are decided
# for us by the API we are *faking* here.
def __init__(self, obj_ref, prop_list=None, missing_list=None):
self.obj = obj_ref
if not isinstance(prop_list, collections.abc.Iterable):
prop_list = []
if not isinstance(missing_list, collections.abc.Iterable):
missing_list = []
# propSet is the name your Python code will need to
# use since this is the name that the API will use
if prop_list:
self.propSet = prop_list
# missingSet is the name your python code will
# need to use since this is the name that the
# API we are talking to will use.
if missing_list:
self.missingSet = missing_list
class ManagedObject(object):
"""Managed Object base class."""
_counter = 0
def __init__(self, mo_id_prefix="obj"):
"""Sets the obj property which acts as a reference to the object."""
object.__setattr__(self, 'mo_id', self._generate_moid(mo_id_prefix))
object.__setattr__(self, 'propSet', [])
object.__setattr__(self, 'obj',
ManagedObjectReference(self.__class__.__name__,
self.mo_id))
def set(self, attr, val):
"""Sets an attribute value. Not using the __setattr__ directly for we
want to set attributes of the type 'a.b.c' and using this function
class we set the same.
"""
self.__setattr__(attr, val)
def get(self, attr):
"""Gets an attribute. Used as an intermediary to get nested
property like 'a.b.c' value.
"""
return self.__getattr__(attr)
def delete(self, attr):
"""Deletes an attribute."""
self.propSet = [elem for elem in self.propSet if elem.name != attr]
def __setattr__(self, attr, val):
# TODO(hartsocks): this is adds unnecessary complexity to the class
for prop in self.propSet:
if prop.name == attr:
prop.val = val
return
elem = Prop()
elem.name = attr
elem.val = val
self.propSet.append(elem)
def __getattr__(self, attr):
# TODO(hartsocks): remove this
# in a real ManagedObject you have to iterate the propSet
# in a real ManagedObject, the propSet is a *set* not a list
for elem in self.propSet:
if elem.name == attr:
return elem.val
msg = "Property %(attr)s not set for the managed object %(name)s"
raise exception.NovaException(msg % {'attr': attr,
'name': self.__class__.__name__})
def _generate_moid(self, prefix):
"""Generates a new Managed Object ID."""
self.__class__._counter += 1
return prefix + "-" + str(self.__class__._counter)
def __repr__(self):
return jsonutils.dumps({elem.name: elem.val
for elem in self.propSet})
class DataObject(object):
"""Data object base class."""
def __init__(self, obj_name=None):
if obj_name is None:
obj_name = 'ns0:' + self.__class__.__name__
self.obj_name = obj_name
def __repr__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class HostInternetScsiHba(DataObject):
"""iSCSI Host Bus Adapter."""
def __init__(self, iscsi_name=None):
super(HostInternetScsiHba, self).__init__()
self.device = 'vmhba33'
self.key = 'key-vmhba33'
self.iScsiName = iscsi_name
class FileAlreadyExists(DataObject):
"""File already exists class."""
def __init__(self):
super(FileAlreadyExists, self).__init__()
self.__name__ = vexc.FILE_ALREADY_EXISTS
class FileNotFound(DataObject):
"""File not found class."""
def __init__(self):
super(FileNotFound, self).__init__()
self.__name__ = vexc.FILE_NOT_FOUND
class FileFault(DataObject):
"""File fault."""
def __init__(self):
super(FileFault, self).__init__()
self.__name__ = vexc.FILE_FAULT
class CannotDeleteFile(DataObject):
"""Cannot delete file."""
def __init__(self):
super(CannotDeleteFile, self).__init__()
self.__name__ = vexc.CANNOT_DELETE_FILE
class FileLocked(DataObject):
"""File locked."""
def __init__(self):
super(FileLocked, self).__init__()
self.__name__ = vexc.FILE_LOCKED
class VirtualDisk(DataObject):
"""Virtual Disk class."""
def __init__(self, controllerKey=0, unitNumber=0):
super(VirtualDisk, self).__init__()
self.key = 0
self.controllerKey = controllerKey
self.unitNumber = unitNumber
class VirtualDiskFlatVer2BackingInfo(DataObject):
"""VirtualDiskFlatVer2BackingInfo class."""
def __init__(self):
super(VirtualDiskFlatVer2BackingInfo, self).__init__()
self.thinProvisioned = False
self.eagerlyScrub = False
class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject):
"""VirtualDiskRawDiskMappingVer1BackingInfo class."""
def __init__(self):
super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__()
self.lunUuid = ""
class VirtualIDEController(DataObject):
def __init__(self, key=0):
self.key = key
class VirtualLsiLogicController(DataObject):
"""VirtualLsiLogicController class."""
def __init__(self, key=0, scsiCtlrUnitNumber=0, busNumber=0):
self.key = key
self.busNumber = busNumber
self.scsiCtlrUnitNumber = scsiCtlrUnitNumber
self.device = []
class VirtualLsiLogicSASController(DataObject):
"""VirtualLsiLogicSASController class."""
pass
class VirtualPCNet32(DataObject):
"""VirtualPCNet32 class."""
def __init__(self):
super(VirtualPCNet32, self).__init__()
self.key = 4000
class OptionValue(DataObject):
"""OptionValue class."""
def __init__(self, key=None, value=None):
super(OptionValue, self).__init__()
self.key = key
self.value = value
class VirtualMachine(ManagedObject):
"""Virtual Machine class."""
def __init__(self, **kwargs):
super(VirtualMachine, self).__init__("vm")
self.set("name", kwargs.get("name", 'test-vm'))
self.set("runtime.connectionState",
kwargs.get("conn_state", "connected"))
self.set("summary.config.guestId",
kwargs.get("guest", constants.DEFAULT_OS_TYPE))
ds_do = kwargs.get("ds", None)
self.set("datastore", _convert_to_array_of_mor(ds_do))
self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
"toolsOk"))
self.set("summary.guest.toolsRunningStatus", kwargs.get(
"toolsrunningstate", "guestToolsRunning"))
self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
self.set("config.files.vmPathName", kwargs.get("vmPathName"))
self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
self.set("summary.config.instanceUuid", kwargs.get("instanceUuid"))
self.set("version", kwargs.get("version"))
devices = _create_array_of_type('VirtualDevice')
devices.VirtualDevice = kwargs.get("virtual_device", [])
self.set("config.hardware.device", devices)
exconfig_do = kwargs.get("extra_config", None)
self.set("config.extraConfig",
_convert_to_array_of_opt_val(exconfig_do))
if exconfig_do:
for optval in exconfig_do:
self.set('config.extraConfig["%s"]' % optval.key, optval)
self.set('runtime.host', kwargs.get("runtime_host", None))
self.device = kwargs.get("virtual_device", [])
# Sample of diagnostics data is below.
config = [
('template', False),
('vmPathName', 'fake_path'),
('memorySizeMB', 512),
('cpuReservation', 0),
('memoryReservation', 0),
('numCpu', 1),
('numEthernetCards', 1),
('numVirtualDisks', 1)]
self.set("summary.config", config)
quickStats = [
('overallCpuUsage', 0),
('overallCpuDemand', 0),
('guestMemoryUsage', 0),
('hostMemoryUsage', 141),
('balloonedMemory', 0),
('consumedOverheadMemory', 20)]
self.set("summary.quickStats", quickStats)
key1 = {'key': 'cpuid.AES'}
key2 = {'key': 'cpuid.AVX'}
runtime = [
('connectionState', 'connected'),
('powerState', 'poweredOn'),
('toolsInstallerMounted', False),
('suspendInterval', 0),
('memoryOverhead', 21417984),
('maxCpuUsage', 2000),
('featureRequirement', [key1, key2])]
self.set("summary.runtime", runtime)
def _update_extra_config(self, extra):
extra_config = self.get("config.extraConfig")
values = extra_config.OptionValue
for value in values:
if value.key == extra.key:
value.value = extra.value
return
kv = DataObject()
kv.key = extra.key
kv.value = extra.value
extra_config.OptionValue.append(kv)
self.set("config.extraConfig", extra_config)
extra_config = self.get("config.extraConfig")
def reconfig(self, factory, val):
"""Called to reconfigure the VM. Actually customizes the property
setting of the Virtual Machine object.
"""
if hasattr(val, 'name') and val.name:
self.set("name", val.name)
if hasattr(val, 'extraConfig'):
extraConfigs = _merge_extraconfig(
self.get("config.extraConfig").OptionValue,
val.extraConfig)
self.get("config.extraConfig").OptionValue = extraConfigs
if hasattr(val, 'instanceUuid') and val.instanceUuid is not None:
if val.instanceUuid == "":
val.instanceUuid = uuidutils.generate_uuid()
self.set("summary.config.instanceUuid", val.instanceUuid)
try:
if not hasattr(val, 'deviceChange'):
return
if hasattr(val, 'extraConfig'):
# there are 2 cases - new entry or update an existing one
for extra in val.extraConfig:
self._update_extra_config(extra)
if len(val.deviceChange) < 2:
return
# Case of Reconfig of VM to attach disk
controller_key = val.deviceChange[0].device.controllerKey
filename = val.deviceChange[0].device.backing.fileName
disk = VirtualDisk()
disk.controllerKey = controller_key
disk_backing = VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk_backing.key = -101
disk.backing = disk_backing
disk.capacityInBytes = 1024
disk.capacityInKB = 1
controller = VirtualLsiLogicController()
controller.key = controller_key
devices = _create_array_of_type('VirtualDevice')
devices.VirtualDevice = [disk, controller, self.device[0]]
self.set("config.hardware.device", devices)
except AttributeError:
pass
class Folder(ManagedObject):
"""Folder class."""
def __init__(self):
super(Folder, self).__init__("Folder")
self.set("childEntity", [])
class Network(ManagedObject):
"""Network class."""
def __init__(self):
super(Network, self).__init__("network")
self.set("summary.name", "vmnet0")
class ResourcePool(ManagedObject):
"""Resource Pool class."""
def __init__(self, name="test_ResPool", value="resgroup-test"):
super(ResourcePool, self).__init__("rp")
self.set("name", name)
summary = DataObject()
runtime = DataObject()
config = DataObject()
memory = DataObject()
cpu = DataObject()
memoryAllocation = DataObject()
cpuAllocation = DataObject()
vm_list = DataObject()
memory.maxUsage = 1000 * units.Mi
memory.overallUsage = 500 * units.Mi
cpu.maxUsage = 10000
cpu.overallUsage = 1000
runtime.cpu = cpu
runtime.memory = memory
summary.runtime = runtime
cpuAllocation.limit = 10000
memoryAllocation.limit = 1024
memoryAllocation.reservation = 1024
config.memoryAllocation = memoryAllocation
config.cpuAllocation = cpuAllocation
vm_list.ManagedObjectReference = []
self.set("summary", summary)
self.set("summary.runtime.memory", memory)
self.set("config", config)
self.set("vm", vm_list)
parent = ManagedObjectReference(value=value,
name=name)
owner = ManagedObjectReference(value=value,
name=name)
self.set("parent", parent)
self.set("owner", owner)
class DatastoreHostMount(DataObject):
def __init__(self, value='host-100'):
super(DatastoreHostMount, self).__init__()
host_ref = (_db_content["HostSystem"]
[list(_db_content["HostSystem"].keys())[0]].obj)
host_system = DataObject()
host_system.ManagedObjectReference = [host_ref]
host_system.value = value
self.key = host_system
class ClusterComputeResource(ManagedObject):
"""Cluster class."""
def __init__(self, name="test_cluster"):
super(ClusterComputeResource, self).__init__("domain")
self.set("name", name)
self.set("host", None)
self.set("datastore", None)
self.set("resourcePool", None)
summary = DataObject()
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
summary.effectiveCpu = 10000
self.set("summary", summary)
def _add_root_resource_pool(self, r_pool):
if r_pool:
self.set("resourcePool", r_pool)
def _add_host(self, host_sys):
if host_sys:
hosts = self.get("host")
if hosts is None:
hosts = DataObject()
hosts.ManagedObjectReference = []
self.set("host", hosts)
hosts.ManagedObjectReference.append(host_sys)
# Update summary every time a new host is added
self._update_summary()
def _add_datastore(self, datastore):
if datastore:
datastores = self.get("datastore")
if datastores is None:
datastores = DataObject()
datastores.ManagedObjectReference = []
self.set("datastore", datastores)
datastores.ManagedObjectReference.append(datastore)
# Method to update summary of a cluster upon host addition
def _update_summary(self):
summary = self.get("summary")
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
hosts = self.get("host")
# Compute the aggregate stats
summary.numHosts = len(hosts.ManagedObjectReference)
for host_ref in hosts.ManagedObjectReference:
host_sys = _get_object(host_ref)
connected = host_sys.get("connected")
host_summary = host_sys.get("summary")
summary.numCpuCores += host_summary.hardware.numCpuCores
summary.numCpuThreads += host_summary.hardware.numCpuThreads
summary.totalMemory += host_summary.hardware.memorySize
free_memory = (host_summary.hardware.memorySize / units.Mi -
host_summary.quickStats.overallMemoryUsage)
summary.effectiveMemory += free_memory if connected else 0
summary.numEffectiveHosts += 1 if connected else 0
self.set("summary", summary)
class Datastore(ManagedObject):
"""Datastore class."""
def __init__(self, name="fake-ds", capacity=1024, free=500,
accessible=True, maintenance_mode="normal"):
super(Datastore, self).__init__("ds")
self.set("summary.type", "VMFS")
self.set("summary.name", name)
self.set("summary.capacity", capacity * units.Gi)
self.set("summary.freeSpace", free * units.Gi)
self.set("summary.accessible", accessible)
self.set("summary.maintenanceMode", maintenance_mode)
self.set("browser", "")
class HostNetworkSystem(ManagedObject):
"""HostNetworkSystem class."""
def __init__(self, name="networkSystem"):
super(HostNetworkSystem, self).__init__("ns")
self.set("name", name)
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("networkInfo.pnic", net_info_pnic)
class HostStorageSystem(ManagedObject):
"""HostStorageSystem class."""
def __init__(self):
super(HostStorageSystem, self).__init__("storageSystem")
class HostSystem(ManagedObject):
"""Host System class."""
def __init__(self, name="ha-host", connected=True, ds_ref=None,
maintenance_mode=False):
super(HostSystem, self).__init__("host")
self.set("name", name)
if _db_content.get("HostNetworkSystem", None) is None:
create_host_network_system()
if not _get_object_refs('HostStorageSystem'):
create_host_storage_system()
host_net_key = list(_db_content["HostNetworkSystem"].keys())[0]
host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
self.set("configManager.networkSystem", host_net_sys)
host_storage_sys_key = _get_object_refs('HostStorageSystem')[0]
self.set("configManager.storageSystem", host_storage_sys_key)
if not ds_ref:
ds_ref = create_datastore('local-host-%s' % name, 500, 500)
datastores = DataObject()
datastores.ManagedObjectReference = [ds_ref]
self.set("datastore", datastores)
summary = DataObject()
hardware = DataObject()
hardware.numCpuCores = 8
hardware.numCpuPkgs = 2
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
hardware.uuid = "host-uuid"
hardware.memorySize = units.Gi
summary.hardware = hardware
runtime = DataObject()
if connected:
runtime.connectionState = "connected"
else:
runtime.connectionState = "disconnected"
runtime.inMaintenanceMode = maintenance_mode
summary.runtime = runtime
quickstats = DataObject()
quickstats.overallMemoryUsage = 500
summary.quickStats = quickstats
product = DataObject()
product.name = "VMware ESXi"
product.version = constants.MIN_VC_VERSION
config = DataObject()
config.product = product
summary.config = config
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("summary", summary)
self.set("capability.maxHostSupportedVcpus", 600)
self.set("summary.hardware", hardware)
self.set("summary.runtime", runtime)
self.set("summary.quickStats", quickstats)
self.set("config.network.pnic", net_info_pnic)
self.set("connected", connected)
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][
list(_db_content["Network"].keys())[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = "vSwitch0"
vswitch_do.portgroup = ["PortGroup-vmnet0"]
net_swicth = DataObject()
net_swicth.HostVirtualSwitch = [vswitch_do]
self.set("config.network.vswitch", net_swicth)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-vmnet0"
pg_spec = DataObject()
pg_spec.vlanId = 0
pg_spec.name = "vmnet0"
host_pg_do.spec = pg_spec
host_pg = DataObject()
host_pg.HostPortGroup = [host_pg_do]
self.set("config.network.portgroup", host_pg)
config = DataObject()
storageDevice = DataObject()
iscsi_hba = HostInternetScsiHba()
iscsi_hba.iScsiName = "iscsi-name"
host_bus_adapter_array = DataObject()
host_bus_adapter_array.HostHostBusAdapter = [iscsi_hba]
storageDevice.hostBusAdapter = host_bus_adapter_array
config.storageDevice = storageDevice
self.set("config.storageDevice.hostBusAdapter", host_bus_adapter_array)
# Set the same on the storage system managed object
host_storage_sys = _get_object(host_storage_sys_key)
host_storage_sys.set('storageDeviceInfo.hostBusAdapter',
host_bus_adapter_array)
def _add_iscsi_target(self, data):
default_lun = DataObject()
default_lun.scsiLun = 'key-vim.host.ScsiDisk-010'
default_lun.key = 'key-vim.host.ScsiDisk-010'
default_lun.deviceName = 'fake-device'
default_lun.uuid = 'fake-uuid'
scsi_lun_array = DataObject()
scsi_lun_array.ScsiLun = [default_lun]
self.set("config.storageDevice.scsiLun", scsi_lun_array)
transport = DataObject()
transport.address = [data['target_portal']]
transport.iScsiName = data['target_iqn']
default_target = DataObject()
default_target.lun = [default_lun]
default_target.transport = transport
iscsi_adapter = DataObject()
iscsi_adapter.adapter = 'key-vmhba33'
iscsi_adapter.transport = transport
iscsi_adapter.target = [default_target]
iscsi_topology = DataObject()
iscsi_topology.adapter = [iscsi_adapter]
self.set("config.storageDevice.scsiTopology", iscsi_topology)
def _add_port_group(self, spec):
"""Adds a port group to the host system object in the db."""
pg_name = spec.name
vswitch_name = spec.vswitchName
vlanid = spec.vlanId
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = vswitch_name
vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
vswitches = self.get("config.network.vswitch").HostVirtualSwitch
vswitches.append(vswitch_do)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-%s" % pg_name
pg_spec = DataObject()
pg_spec.vlanId = vlanid
pg_spec.name = pg_name
host_pg_do.spec = pg_spec
host_pgrps = self.get("config.network.portgroup").HostPortGroup
host_pgrps.append(host_pg_do)
class Datacenter(ManagedObject):
"""Datacenter class."""
def __init__(self, name="ha-datacenter", ds_ref=None):
super(Datacenter, self).__init__("dc")
self.set("name", name)
if _db_content.get("Folder", None) is None:
create_folder()
folder_ref = _db_content["Folder"][
list(_db_content["Folder"].keys())[0]].obj
folder_do = DataObject()
folder_do.ManagedObjectReference = [folder_ref]
self.set("vmFolder", folder_ref)
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][
list(_db_content["Network"].keys())[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
if ds_ref:
datastore = DataObject()
datastore.ManagedObjectReference = [ds_ref]
else:
datastore = None
self.set("datastore", datastore)
class Task(ManagedObject):
"""Task class."""
def __init__(self, task_name, state="running", result=None,
error_fault=None):
super(Task, self).__init__("Task")
info = DataObject()
info.name = task_name
info.state = state
if state == 'error':
error = DataObject()
error.localizedMessage = "Error message"
if not error_fault:
error.fault = DataObject()
else:
error.fault = error_fault
info.error = error
info.result = result
self.set("info", info)
def create_host_network_system():
host_net_system = HostNetworkSystem()
_create_object("HostNetworkSystem", host_net_system)
def create_host_storage_system():
host_storage_system = HostStorageSystem()
_create_object("HostStorageSystem", host_storage_system)
def create_host(ds_ref=None):
host_system = HostSystem(ds_ref=ds_ref)
_create_object('HostSystem', host_system)
def create_datacenter(name, ds_ref=None):
data_center = Datacenter(name, ds_ref)
_create_object('Datacenter', data_center)
def create_datastore(name, capacity, free):
data_store = Datastore(name, capacity, free)
_create_object('Datastore', data_store)
return data_store.obj
def create_res_pool():
res_pool = ResourcePool()
_create_object('ResourcePool', res_pool)
return res_pool.obj
def create_folder():
folder = Folder()
_create_object('Folder', folder)
return folder.obj
def create_network():
network = Network()
_create_object('Network', network)
def create_cluster(name, ds_ref):
cluster = ClusterComputeResource(name=name)
cluster._add_host(_get_object_refs("HostSystem")[0])
cluster._add_host(_get_object_refs("HostSystem")[1])
cluster._add_datastore(ds_ref)
cluster._add_root_resource_pool(create_res_pool())
_create_object('ClusterComputeResource', cluster)
return cluster
def create_vm(uuid=None, name=None,
cpus=1, memory=128, devices=None,
vmPathName=None, extraConfig=None,
res_pool_ref=None, host_ref=None,
version=None):
if uuid is None:
uuid = uuidutils.generate_uuid()
if name is None:
name = uuid
if devices is None:
devices = []
if vmPathName is None:
vm_path = ds_obj.DatastorePath(
list(_db_content['Datastore'].values())[0])
else:
vm_path = ds_obj.DatastorePath.parse(vmPathName)
if res_pool_ref is None:
res_pool_ref = list(_db_content['ResourcePool'].keys())[0]
if host_ref is None:
host_ref = list(_db_content["HostSystem"].keys())[0]
# Fill in the default path to the vmx file if we were only given a
# datastore. Note that if you create a VM with vmPathName '[foo]', when you
# retrieve vmPathName it will be '[foo] uuid/uuid.vmx'. Hence we use
# vm_path below for the stored value of vmPathName.
if vm_path.rel_path == '':
vm_path = vm_path.join(name, name + '.vmx')
for key, value in _db_content["Datastore"].items():
if value.get('summary.name') == vm_path.datastore:
ds = key
break
else:
ds = create_datastore(vm_path.datastore, 1024, 500)
vm_dict = {"name": name,
"ds": [ds],
"runtime_host": host_ref,
"powerstate": "poweredOff",
"vmPathName": str(vm_path),
"numCpu": cpus,
"mem": memory,
"extra_config": extraConfig,
"virtual_device": devices,
"instanceUuid": uuid,
"version": version}
vm = VirtualMachine(**vm_dict)
_create_object("VirtualMachine", vm)
res_pool = _get_object(res_pool_ref)
res_pool.vm.ManagedObjectReference.append(vm.obj)
return vm.obj
def create_task(task_name, state="running", result=None, error_fault=None):
task = Task(task_name, state, result, error_fault)
_create_object("Task", task)
return task
def _add_file(file_path):
"""Adds a file reference to the db."""
_db_content["files"].append(file_path)
def _remove_file(file_path):
"""Removes a file reference from the db."""
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
raise vexc.FileNotFoundException(file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
to_delete = set()
for file in _db_content.get("files"):
if file.find(file_path) != -1:
to_delete.add(file)
for file in to_delete:
_db_content.get("files").remove(file)
def fake_plug_vifs(*args, **kwargs):
"""Fakes plugging vifs."""
pass
def fake_get_network(*args, **kwargs):
"""Fake get network."""
return {'type': 'fake'}
def assertPathExists(test, path):
test.assertIn(path, _db_content.get('files'))
def assertPathNotExists(test, path):
test.assertNotIn(path, _db_content.get('files'))
def get_file(file_path):
"""Check if file exists in the db."""
return file_path in _db_content.get("files")
def fake_upload_image(context, image, instance, **kwargs):
"""Fakes the upload of an image."""
pass
def fake_fetch_image(context, instance, host, port, dc_name, ds_name,
file_path, cookies=None):
"""Fakes the fetch of an image."""
ds_file_path = "[" + ds_name + "] " + file_path
_add_file(ds_file_path)
def _get_vm_mdo(vm_ref):
"""Gets the Virtual Machine with the ref from the db."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound("There is no VM registered")
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound("Virtual Machine with ref %s is not "
"there" % vm_ref)
return _db_content.get("VirtualMachine")[vm_ref]
def _merge_extraconfig(existing, changes):
"""Imposes the changes in extraConfig over the existing extraConfig."""
existing = existing or []
if (changes):
for c in changes:
if len([x for x in existing if x.key == c.key]) > 0:
extraConf = [x for x in existing if x.key == c.key][0]
extraConf.value = c.value
else:
existing.append(c)
return existing
class FakeFactory(object):
"""Fake factory class for the suds client."""
def create(self, obj_name):
"""Creates a namespace object."""
klass = obj_name[4:] # skip 'ns0:'
module = sys.modules[__name__]
fake_klass = getattr(module, klass, None)
if fake_klass is None:
return DataObject(obj_name)
else:
return fake_klass()
class SharesInfo(DataObject):
def __init__(self):
super(SharesInfo, self).__init__()
self.level = None
self.shares = None
class VirtualEthernetCardResourceAllocation(DataObject):
def __init__(self):
super(VirtualEthernetCardResourceAllocation, self).__init__()
self.share = SharesInfo()
class VirtualE1000(DataObject):
def __init__(self):
super(VirtualE1000, self).__init__()
self.resourceAllocation = VirtualEthernetCardResourceAllocation()
class FakeService(DataObject):
"""Fake service class."""
def Logout(self, session_manager):
pass
def FindExtension(self, extension_manager, key):
return []
class FakeClient(DataObject):
"""Fake client class."""
def __init__(self):
"""Creates a namespace object."""
self.service = FakeService()
class FakeSession(object):
"""Fake Session Class."""
def __init__(self):
self.vim = FakeVim()
def _call_method(self, module, method, *args, **kwargs):
raise NotImplementedError()
def _wait_for_task(self, task_ref):
raise NotImplementedError()
class FakeObjectRetrievalSession(FakeSession):
"""A session for faking object retrieval tasks.
_call_method() returns a given set of objects
sequentially, regardless of the method called.
"""
def __init__(self, *ret):
super(FakeObjectRetrievalSession, self).__init__()
self.ret = ret
self.ind = 0
def _call_method(self, module, method, *args, **kwargs):
if (method == 'continue_retrieval' or
method == 'cancel_retrieval'):
return
# return fake objects in a circular manner
self.ind = (self.ind + 1) % len(self.ret)
return self.ret[self.ind - 1]
def get_fake_vim_object(vmware_api_session):
key = vmware_api_session.__repr__()
if key not in _vim_map:
_vim_map[key] = FakeVim()
return _vim_map[key]
class FakeVim(object):
"""Fake VIM Class."""
def __init__(self, protocol="https", host="localhost", trace=None):
"""Initializes the suds client object, sets the service content
contents and the cookies for the session.
"""
self._session = None
self.client = FakeClient()
self.client.factory = FakeFactory()
transport = DataObject()
transport.cookiejar = "Fake-CookieJar"
options = DataObject()
options.transport = transport
self.client.options = options
service_content = self.client.factory.create('ns0:ServiceContent')
service_content.propertyCollector = "PropCollector"
service_content.virtualDiskManager = "VirtualDiskManager"
service_content.fileManager = "FileManager"
service_content.rootFolder = "RootFolder"
service_content.sessionManager = "SessionManager"
service_content.extensionManager = "ExtensionManager"
service_content.searchIndex = "SearchIndex"
about_info = DataObject()
about_info.name = "VMware vCenter Server"
about_info.version = constants.MIN_VC_VERSION
about_info.instanceUuid = _FAKE_VCENTER_UUID
service_content.about = about_info
self._service_content = service_content
@property
def service_content(self):
return self._service_content
def __repr__(self):
return "Fake VIM Object"
def __str__(self):
return "Fake VIM Object"
def _login(self):
"""Logs in and sets the session object in the db."""
self._session = uuidutils.generate_uuid()
session = DataObject()
session.key = self._session
session.userName = 'sessionUserName'
_db_content['session'][self._session] = session
return session
def _terminate_session(self, *args, **kwargs):
"""Terminates a session."""
s = kwargs.get("sessionId")[0]
if s not in _db_content['session']:
return
del _db_content['session'][s]
def _check_session(self):
"""Checks if the session is active."""
if (self._session is None or self._session not in
_db_content['session']):
LOG.debug("Session is faulty")
raise vexc.VimFaultException([vexc.NOT_AUTHENTICATED],
"Session Invalid")
def _session_is_active(self, *args, **kwargs):
try:
self._check_session()
return True
except Exception:
return False
def _create_vm(self, method, *args, **kwargs):
"""Creates and registers a VM object with the Host System."""
config_spec = kwargs.get("config")
if config_spec.guestId not in constants.VALID_OS_TYPES:
ex = vexc.VMwareDriverException('A specified parameter was '
'not correct.')
return create_task(method, "error", error_fault=ex).obj
pool = kwargs.get('pool')
version = getattr(config_spec, 'version', None)
devices = []
for device_change in config_spec.deviceChange:
if device_change.operation == 'add':
devices.append(device_change.device)
vm_ref = create_vm(config_spec.instanceUuid, config_spec.name,
config_spec.numCPUs, config_spec.memoryMB,
devices, config_spec.files.vmPathName,
config_spec.extraConfig, pool,
version=version)
task_mdo = create_task(method, "success", result=vm_ref)
return task_mdo.obj
def _create_folder(self, method, *args, **kwargs):
return create_folder()
def _reconfig_vm(self, method, *args, **kwargs):
"""Reconfigures a VM and sets the properties supplied."""
vm_ref = args[0]
vm_mdo = _get_vm_mdo(vm_ref)
vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _rename(self, method, *args, **kwargs):
vm_ref = args[0]
vm_mdo = _get_vm_mdo(vm_ref)
vm_mdo.set('name', kwargs['newName'])
task_mdo = create_task(method, "success")
return task_mdo.obj
def _create_copy_disk(self, method, vmdk_file_path):
"""Creates/copies a vmdk file object in the datastore."""
# We need to add/create both .vmdk and .-flat.vmdk files
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_add_file(vmdk_file_path)
_add_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _extend_disk(self, method, size):
"""Extend disk size when create an instance."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _snapshot_vm(self, method):
"""Snapshots a VM. Here we do nothing for faking sake."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _find_all_by_uuid(self, *args, **kwargs):
uuid = kwargs.get('uuid')
vm_refs = []
for vm_ref in _db_content.get("VirtualMachine"):
vm = _get_object(vm_ref)
vm_uuid = vm.get("summary.config.instanceUuid")
if vm_uuid == uuid:
vm_refs.append(vm_ref)
return vm_refs
def _delete_snapshot(self, method, *args, **kwargs):
"""Deletes a VM snapshot. Here we do nothing for faking sake."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _delete_file(self, method, *args, **kwargs):
"""Deletes a file from the datastore."""
_remove_file(kwargs.get("name"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _just_return(self):
"""Fakes a return."""
return
def _just_return_task(self, method):
"""Fakes a task return."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _clone_vm(self, method, *args, **kwargs):
"""Fakes a VM clone."""
"""Creates and registers a VM object with the Host System."""
source_vmref = args[0]
source_vm_mdo = _get_vm_mdo(source_vmref)
clone_spec = kwargs.get("spec")
vm_dict = {
"name": kwargs.get("name"),
"ds": source_vm_mdo.get("datastore"),
"runtime_host": source_vm_mdo.get("runtime.host"),
"powerstate": source_vm_mdo.get("runtime.powerState"),
"vmPathName": source_vm_mdo.get("config.files.vmPathName"),
"numCpu": source_vm_mdo.get("summary.config.numCpu"),
"mem": source_vm_mdo.get("summary.config.memorySizeMB"),
"extra_config": source_vm_mdo.get("config.extraConfig").OptionValue,
"virtual_device":
source_vm_mdo.get("config.hardware.device").VirtualDevice,
"instanceUuid": source_vm_mdo.get("summary.config.instanceUuid")}
if hasattr(clone_spec, 'config'):
# Impose the config changes specified in the config property
if (hasattr(clone_spec.config, 'instanceUuid') and
clone_spec.config.instanceUuid is not None):
vm_dict["instanceUuid"] = clone_spec.config.instanceUuid
if hasattr(clone_spec.config, 'extraConfig'):
extraConfigs = _merge_extraconfig(vm_dict["extra_config"],
clone_spec.config.extraConfig)
vm_dict["extra_config"] = extraConfigs
virtual_machine = VirtualMachine(**vm_dict)
_create_object("VirtualMachine", virtual_machine)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _unregister_vm(self, method, *args, **kwargs):
"""Unregisters a VM from the Host System."""
vm_ref = args[0]
_get_vm_mdo(vm_ref)
del _db_content["VirtualMachine"][vm_ref]
task_mdo = create_task(method, "success")
return task_mdo.obj
def _search_ds(self, method, *args, **kwargs):
"""Searches the datastore for a file."""
# TODO(garyk): add support for spec parameter
ds_path = kwargs.get("datastorePath")
matched_files = set()
# Check if we are searching for a file or a directory
directory = False
dname = '%s/' % ds_path
for file in _db_content.get("files"):
if file == dname:
directory = True
break
# A directory search implies that we must return all
# subdirectories
if directory:
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
if not file.endswith(ds_path):
path = file.replace(dname, '', 1).split('/')
if path:
matched_files.add(path[0])
if not matched_files:
matched_files.add('/')
else:
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
matched_files.add(ds_path)
if matched_files:
result = DataObject()
result.path = ds_path
result.file = []
for file in matched_files:
matched = DataObject()
matched.path = file
matched.fileSize = 1024
result.file.append(matched)
task_mdo = create_task(method, "success", result=result)
else:
task_mdo = create_task(method, "error", error_fault=FileNotFound())
return task_mdo.obj
def _move_file(self, method, *args, **kwargs):
source = kwargs.get('sourceName')
destination = kwargs.get('destinationName')
new_files = []
if source != destination:
for file in _db_content.get("files"):
if source in file:
new_file = file.replace(source, destination)
new_files.append(new_file)
# if source is not a file then the children will also
# be deleted
_remove_file(source)
for file in new_files:
_add_file(file)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _make_dir(self, method, *args, **kwargs):
"""Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if get_file(ds_path):
raise vexc.FileAlreadyExistsException()
_db_content["files"].append('%s/' % ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
"""Sets power state for the VM."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound("No Virtual Machine has been "
"registered yet")
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound("Virtual Machine with ref %s is not "
"there" % vm_ref)
vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
vm_mdo.set("runtime.powerState", pwr_state)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _retrieve_properties_continue(self, method, *args, **kwargs):
"""Continues the retrieve."""
return FakeRetrieveResult()
def _retrieve_properties_cancel(self, method, *args, **kwargs):
"""Cancels the retrieve."""
return None
def _retrieve_properties(self, method, *args, **kwargs):
"""Retrieves properties based on the type."""
spec_set = kwargs.get("specSet")[0]
spec_type = spec_set.propSet[0].type
properties = spec_set.propSet[0].pathSet
if not isinstance(properties, list):
properties = properties.split()
objs = spec_set.objectSet
lst_ret_objs = FakeRetrieveResult()
for obj in objs:
try:
obj_ref = obj.obj
if obj_ref == "RootFolder":
# This means that we are retrieving props for all managed
# data objects of the specified 'type' in the entire
# inventory. This gets invoked by vim_util.get_objects.
mdo_refs = _db_content[spec_type]
elif obj_ref.type != spec_type:
# This means that we are retrieving props for the managed
# data objects in the parent object's 'path' property.
# This gets invoked by vim_util.get_inner_objects
# eg. obj_ref = <ManagedObjectReference of a cluster>
# type = 'DataStore'
# path = 'datastore'
# the above will retrieve all datastores in the given
# cluster.
parent_mdo = _db_content[obj_ref.type][obj_ref]
path = obj.selectSet[0].path
mdo_refs = parent_mdo.get(path).ManagedObjectReference
else:
# This means that we are retrieving props of the given
# managed data object. This gets invoked by
# vim_util.get_properties_for_a_collection_of_objects.
mdo_refs = [obj_ref]
for mdo_ref in mdo_refs:
mdo = _db_content[spec_type][mdo_ref]
prop_list = []
for prop_name in properties:
prop = Prop(prop_name, mdo.get(prop_name))
prop_list.append(prop)
obj_content = ObjectContent(mdo.obj, prop_list)
lst_ret_objs.add_object(obj_content)
except Exception:
LOG.exception("_retrieve_properties error")
continue
return lst_ret_objs
def _add_port_group(self, method, *args, **kwargs):
"""Adds a port group to the host system."""
_host_sk = list(_db_content["HostSystem"].keys())[0]
host_mdo = _db_content["HostSystem"][_host_sk]
host_mdo._add_port_group(kwargs.get("portgrp"))
def _add_iscsi_send_tgt(self, method, *args, **kwargs):
"""Adds a iscsi send target to the hba."""
send_targets = kwargs.get('targets')
host_storage_sys = _get_objects('HostStorageSystem').objects[0]
iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
'.hostBusAdapter')
iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
if hasattr(iscsi_hba, 'configuredSendTarget'):
iscsi_hba.configuredSendTarget.extend(send_targets)
else:
iscsi_hba.configuredSendTarget = send_targets
def __getattr__(self, attr_name):
if attr_name != "Login":
self._check_session()
if attr_name == "Login":
return lambda *args, **kwargs: self._login()
elif attr_name == "SessionIsActive":
return lambda *args, **kwargs: self._session_is_active(
*args, **kwargs)
elif attr_name == "TerminateSession":
return lambda *args, **kwargs: self._terminate_session(
*args, **kwargs)
elif attr_name == "CreateVM_Task":
return lambda *args, **kwargs: self._create_vm(attr_name,
*args, **kwargs)
elif attr_name == "CreateFolder":
return lambda *args, **kwargs: self._create_folder(attr_name,
*args, **kwargs)
elif attr_name == "ReconfigVM_Task":
return lambda *args, **kwargs: self._reconfig_vm(attr_name,
*args, **kwargs)
elif attr_name == "Rename_Task":
return lambda *args, **kwargs: self._rename(attr_name,
*args, **kwargs)
elif attr_name == "CreateVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("name"))
elif attr_name == "DeleteDatastoreFile_Task":
return lambda *args, **kwargs: self._delete_file(attr_name,
*args, **kwargs)
elif attr_name == "PowerOnVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "PowerOffVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOff")
elif attr_name == "RebootGuest":
return lambda *args, **kwargs: self._just_return()
elif attr_name == "ResetVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "SuspendVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "suspended")
elif attr_name == "CreateSnapshot_Task":
return lambda *args, **kwargs: self._snapshot_vm(attr_name)
elif attr_name == "RemoveSnapshot_Task":
return lambda *args, **kwargs: self._delete_snapshot(attr_name,
*args, **kwargs)
elif attr_name == "CopyVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("destName"))
elif attr_name == "ExtendVirtualDisk_Task":
return lambda *args, **kwargs: self._extend_disk(attr_name,
kwargs.get("size"))
elif attr_name == "Destroy_Task":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "UnregisterVM":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "CloneVM_Task":
return lambda *args, **kwargs: self._clone_vm(attr_name,
*args, **kwargs)
elif attr_name == "FindAllByUuid":
return lambda *args, **kwargs: self._find_all_by_uuid(attr_name,
*args, **kwargs)
elif attr_name == "SearchDatastore_Task":
return lambda *args, **kwargs: self._search_ds(attr_name,
*args, **kwargs)
elif attr_name == "MoveDatastoreFile_Task":
return lambda *args, **kwargs: self._move_file(attr_name,
*args, **kwargs)
elif attr_name == "MakeDirectory":
return lambda *args, **kwargs: self._make_dir(attr_name,
*args, **kwargs)
elif attr_name == "RetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties(
attr_name, *args, **kwargs)
elif attr_name == "ContinueRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_continue(
attr_name, *args, **kwargs)
elif attr_name == "CancelRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_cancel(
attr_name, *args, **kwargs)
elif attr_name == "AddPortGroup":
return lambda *args, **kwargs: self._add_port_group(attr_name,
*args, **kwargs)
elif attr_name in ("RebootHost_Task",
"ShutdownHost_Task",
"PowerUpHostFromStandBy_Task",
"EnterMaintenanceMode_Task",
"ExitMaintenanceMode_Task",
"RescanHba"):
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "AddInternetScsiSendTargets":
return lambda *args, **kwargs: self._add_iscsi_send_tgt(attr_name,
*args, **kwargs)
|
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.api import logger
from robot.utils import (is_dict_like, is_string, is_truthy, plural_or_not,
seq2str, seq2str2, type_name, unic, Matcher)
from robot.utils.asserts import assert_equals
from robot.version import get_version
class NotSet(object):
def __repr__(self):
return ""
NOT_SET = NotSet()
class _List(object):
def convert_to_list(self, item):
"""Converts the given ``item`` to a Python ``list`` type.
Mainly useful for converting tuples and other iterable to lists.
Use `Create List` from the BuiltIn library for constructing new lists.
"""
return list(item)
def append_to_list(self, list_, *values):
"""Adds ``values`` to the end of ``list``.
Example:
| Append To List | ${L1} | xxx | | |
| Append To List | ${L2} | x | y | z |
=>
| ${L1} = ['a', 'xxx']
| ${L2} = ['a', 'b', 'x', 'y', 'z']
"""
for value in values:
list_.append(value)
def insert_into_list(self, list_, index, value):
"""Inserts ``value`` into ``list`` to the position specified with ``index``.
Index ``0`` adds the value into the first position, ``1`` to the second,
and so on. Inserting from right works with negative indices so that
``-1`` is the second last position, ``-2`` third last, and so on. Use
`Append To List` to add items to the end of the list.
If the absolute value of the index is greater than
the length of the list, the value is added at the end
(positive index) or the beginning (negative index). An index
can be given either as an integer or a string that can be
converted to an integer.
Example:
| Insert Into List | ${L1} | 0 | xxx |
| Insert Into List | ${L2} | ${-1} | xxx |
=>
| ${L1} = ['xxx', 'a']
| ${L2} = ['a', 'xxx', 'b']
"""
list_.insert(self._index_to_int(index), value)
def combine_lists(self, *lists):
"""Combines the given ``lists`` together and returns the result.
The given lists are not altered by this keyword.
Example:
| ${x} = | Combine List | ${L1} | ${L2} | |
| ${y} = | Combine List | ${L1} | ${L2} | ${L1} |
=>
| ${x} = ['a', 'a', 'b']
| ${y} = ['a', 'a', 'b', 'a']
| ${L1} and ${L2} are not changed.
"""
ret = []
for item in lists:
ret.extend(item)
return ret
def set_list_value(self, list_, index, value):
"""Sets the value of ``list`` specified by ``index`` to the given ``value``.
Index ``0`` means the first position, ``1`` the second and so on.
Similarly, ``-1`` is the last position, ``-2`` second last, and so on.
Using an index that does not exist on the list causes an error.
The index can be either an integer or a string that can be converted to
an integer.
Example:
| Set List Value | ${L3} | 1 | xxx |
| Set List Value | ${L3} | -1 | yyy |
=>
| ${L3} = ['a', 'xxx', 'yyy']
"""
try:
list_[self._index_to_int(index)] = value
except IndexError:
self._index_error(list_, index)
def remove_values_from_list(self, list_, *values):
"""Removes all occurrences of given ``values`` from ``list``.
It is not an error if a value does not exist in the list at all.
Example:
| Remove Values From List | ${L4} | a | c | e | f |
=>
| ${L4} = ['b', 'd']
"""
for value in values:
while value in list_:
list_.remove(value)
def remove_from_list(self, list_, index):
"""Removes and returns the value specified with an ``index`` from ``list``.
Index ``0`` means the first position, ``1`` the second and so on.
Similarly, ``-1`` is the last position, ``-2`` the second last, and so on.
Using an index that does not exist on the list causes an error.
The index can be either an integer or a string that can be converted
to an integer.
Example:
| ${x} = | Remove From List | ${L2} | 0 |
=>
| ${x} = 'a'
| ${L2} = ['b']
"""
try:
return list_.pop(self._index_to_int(index))
except IndexError:
self._index_error(list_, index)
def remove_duplicates(self, list_):
"""Returns a list without duplicates based on the given ``list``.
Creates and returns a new list that contains all items in the given
list so that one item can appear only once. Order of the items in
the new list is the same as in the original except for missing
duplicates. Number of the removed duplicates is logged.
New in Robot Framework 2.7.5.
"""
ret = []
for item in list_:
if item not in ret:
ret.append(item)
removed = len(list_) - len(ret)
logger.info('%d duplicate%s removed.' % (removed, plural_or_not(removed)))
return ret
def get_from_list(self, list_, index):
"""Returns the value specified with an ``index`` from ``list``.
The given list is never altered by this keyword.
Index ``0`` means the first position, ``1`` the second, and so on.
Similarly, ``-1`` is the last position, ``-2`` the second last, and so on.
Using an index that does not exist on the list causes an error.
The index can be either an integer or a string that can be converted
to an integer.
Examples (including Python equivalents in comments):
| ${x} = | Get From List | ${L5} | 0 | # L5[0] |
| ${y} = | Get From List | ${L5} | -2 | # L5[-2] |
=>
| ${x} = 'a'
| ${y} = 'd'
| ${L5} is not changed
"""
try:
return list_[self._index_to_int(index)]
except IndexError:
self._index_error(list_, index)
def get_slice_from_list(self, list_, start=0, end=None):
"""Returns a slice of the given list between ``start`` and ``end`` indexes.
The given list is never altered by this keyword.
If both ``start`` and ``end`` are given, a sublist containing values
from ``start`` to ``end`` is returned. This is the same as
``list[start:end]`` in Python. To get all items from the beginning,
use 0 as the start value, and to get all items until and including
the end, use ``None`` (default) as the end value.
Using ``start`` or ``end`` not found on the list is the same as using
the largest (or smallest) available index.
Examples (incl. Python equivalents in comments):
| ${x} = | Get Slice From List | ${L5} | 2 | 4 | # L5[2:4] |
| ${y} = | Get Slice From List | ${L5} | 1 | | # L5[1:None] |
| ${z} = | Get Slice From List | ${L5} | | -2 | # L5[0:-2] |
=>
| ${x} = ['c', 'd']
| ${y} = ['b', 'c', 'd', 'e']
| ${z} = ['a', 'b', 'c']
| ${L5} is not changed
"""
start = self._index_to_int(start, True)
if end is not None:
end = self._index_to_int(end)
return list_[start:end]
def count_values_in_list(self, list_, value, start=0, end=None):
"""Returns the number of occurrences of the given ``value`` in ``list``.
The search can be narrowed to the selected sublist by the ``start`` and
``end`` indexes having the same semantics as with `Get Slice From List`
keyword. The given list is never altered by this keyword.
Example:
| ${x} = | Count Values In List | ${L3} | b |
=>
| ${x} = 1
| ${L3} is not changed
"""
return self.get_slice_from_list(list_, start, end).count(value)
def get_index_from_list(self, list_, value, start=0, end=None):
"""Returns the index of the first occurrence of the ``value`` on the list.
The search can be narrowed to the selected sublist by the ``start`` and
``end`` indexes having the same semantics as with `Get Slice From List`
keyword. In case the value is not found, -1 is returned. The given list
is never altered by this keyword.
Example:
| ${x} = | Get Index From List | ${L5} | d |
=>
| ${x} = 3
| ${L5} is not changed
"""
if start == '':
start = 0
list_ = self.get_slice_from_list(list_, start, end)
try:
return int(start) + list_.index(value)
except ValueError:
return -1
def copy_list(self, list_):
"""Returns a copy of the given list.
The given list is never altered by this keyword.
"""
return list_[:]
def reverse_list(self, list_):
"""Reverses the given list in place.
Note that the given list is changed and nothing is returned. Use
`Copy List` first, if you need to keep also the original order.
| Reverse List | ${L3} |
=>
| ${L3} = ['c', 'b', 'a']
"""
list_.reverse()
def sort_list(self, list_):
"""Sorts the given list in place.
The strings are sorted alphabetically and the numbers numerically.
Note that the given list is changed and nothing is returned. Use
`Copy List` first, if you need to keep also the original order.
${L} = [2,1,'a','c','b']
| Sort List | ${L} |
=>
| ${L} = [1, 2, 'a', 'b', 'c']
"""
list_.sort()
def list_should_contain_value(self, list_, value, msg=None):
"""Fails if the ``value`` is not found from ``list``.
If the keyword fails, the default error messages is ``<list> does
not contain value '<value>'``. A custom message can be given using
the ``msg`` argument.
"""
default = "%s does not contain value '%s'." % (seq2str2(list_), value)
_verify_condition(value in list_, default, msg)
def list_should_not_contain_value(self, list_, value, msg=None):
"""Fails if the ``value`` is not found from ``list``.
See `List Should Contain Value` for an explanation of ``msg``.
"""
default = "%s contains value '%s'." % (seq2str2(list_), value)
_verify_condition(value not in list_, default, msg)
def list_should_not_contain_duplicates(self, list_, msg=None):
"""Fails if any element in the ``list`` is found from it more than once.
The default error message lists all the elements that were found
from the ``list`` multiple times, but it can be overridden by giving
a custom ``msg``. All multiple times found items and their counts are
also logged.
This keyword works with all iterables that can be converted to a list.
The original iterable is never altered.
"""
if not isinstance(list_, list):
list_ = list(list_)
dupes = []
for item in list_:
if item not in dupes:
count = list_.count(item)
if count > 1:
logger.info("'%s' found %d times." % (item, count))
dupes.append(item)
if dupes:
raise AssertionError(msg or
'%s found multiple times.' % seq2str(dupes))
def lists_should_be_equal(self, list1, list2, msg=None, values=True,
names=None):
"""Fails if given lists are unequal.
The keyword first verifies that the lists have equal lengths, and then
it checks are all their values equal. Possible differences between the
values are listed in the default error message like ``Index 4: ABC !=
Abc``. The types of the lists do not need to be the same. For example,
Python tuple and list with same content are considered equal.
The error message can be configured using ``msg`` and ``values``
arguments:
- If ``msg`` is not given, the default error message is used.
- If ``msg`` is given and ``values`` gets a value considered true
(see `Boolean arguments`), the error message starts with the given
``msg`` followed by a newline and the default message.
- If ``msg`` is given and ``values`` is not given a true value,
the error message is just the given ``msg``.
Optional ``names`` argument can be used for naming the indices shown in
the default error message. It can either be a list of names matching
the indices in the lists or a dictionary where keys are indices that
need to be named. It is not necessary to name all of the indices. When
using a dictionary, keys can be either integers or strings that can be
converted to integers.
Examples:
| ${names} = | Create List | First Name | Family Name | Email |
| Lists Should Be Equal | ${people1} | ${people2} | names=${names} |
| ${names} = | Create Dictionary | 0=First Name | 2=Email |
| Lists Should Be Equal | ${people1} | ${people2} | names=${names} |
If the items in index 2 would differ in the above examples, the error
message would contain a row like ``Index 2 (email): [email protected] !=
[email protected]``.
"""
len1 = len(list1)
len2 = len(list2)
default = 'Lengths are different: %d != %d' % (len1, len2)
_verify_condition(len1 == len2, default, msg, values)
names = self._get_list_index_name_mapping(names, len1)
diffs = list(self._yield_list_diffs(list1, list2, names))
default = 'Lists are different:\n' + '\n'.join(diffs)
_verify_condition(diffs == [], default, msg, values)
def _get_list_index_name_mapping(self, names, list_length):
if not names:
return {}
if is_dict_like(names):
return dict((int(index), names[index]) for index in names)
return dict(zip(range(list_length), names))
def _yield_list_diffs(self, list1, list2, names):
for index, (item1, item2) in enumerate(zip(list1, list2)):
name = ' (%s)' % names[index] if index in names else ''
try:
assert_equals(item1, item2, msg='Index %d%s' % (index, name))
except AssertionError as err:
yield unic(err)
def list_should_contain_sub_list(self, list1, list2, msg=None, values=True):
"""Fails if not all of the elements in ``list2`` are found in ``list1``.
The order of values and the number of values are not taken into
account.
See `Lists Should Be Equal` for more information about configuring
the error message with ``msg`` and ``values`` arguments.
"""
diffs = ', '.join(unic(item) for item in list2 if item not in list1)
default = 'Following values were not found from first list: ' + diffs
_verify_condition(not diffs, default, msg, values)
def log_list(self, list_, level='INFO'):
"""Logs the length and contents of the ``list`` using given ``level``.
Valid levels are TRACE, DEBUG, INFO (default), and WARN.
If you only want to the length, use keyword `Get Length` from
the BuiltIn library.
"""
logger.write('\n'.join(self._log_list(list_)), level)
def _log_list(self, list_):
if not list_:
yield 'List is empty.'
elif len(list_) == 1:
yield 'List has one item:\n%s' % list_[0]
else:
yield 'List length is %d and it contains following items:' % len(list_)
for index, item in enumerate(list_):
yield '%s: %s' % (index, item)
def _index_to_int(self, index, empty_to_zero=False):
if empty_to_zero and not index:
return 0
try:
return int(index)
except ValueError:
raise ValueError("Cannot convert index '%s' to an integer." % index)
def _index_error(self, list_, index):
raise IndexError('Given index %s is out of the range 0-%d.'
% (index, len(list_)-1))
class _Dictionary(object):
def convert_to_dictionary(self, item):
"""Converts the given ``item`` to a Python ``dict`` type.
Mainly useful for converting other mappings to dictionaries. Use
`Create Dictionary` from the BuiltIn library for constructing new
dictionaries.
New in Robot Framework 2.9.
"""
return dict(item)
def set_to_dictionary(self, dictionary, *key_value_pairs, **items):
"""Adds the given ``key_value_pairs`` and ``items`` to the ``dictionary``.
Giving items as ``key_value_pairs`` means giving keys and values
as separate arguments:
| Set To Dictionary | ${D1} | key | value | second | ${2} |
=>
| ${D1} = {'a': 1, 'key': 'value', 'second': 2}
Starting from Robot Framework 2.8.1, items can also be given as kwargs
using ``key=value`` syntax:
| Set To Dictionary | ${D1} | key=value | second=${2} |
The latter syntax is typically more convenient to use, but it has
a limitation that keys must be strings.
If given keys already exist in the dictionary, their values are updated.
"""
if len(key_value_pairs) % 2 != 0:
raise ValueError("Adding data to a dictionary failed. There "
"should be even number of key-value-pairs.")
for i in range(0, len(key_value_pairs), 2):
dictionary[key_value_pairs[i]] = key_value_pairs[i+1]
dictionary.update(items)
return dictionary
def remove_from_dictionary(self, dictionary, *keys):
"""Removes the given ``keys`` from the ``dictionary``.
If the given ``key`` cannot be found from the ``dictionary``, it
is ignored.
Example:
| Remove From Dictionary | ${D3} | b | x | y |
=>
| ${D3} = {'a': 1, 'c': 3}
"""
for key in keys:
if key in dictionary:
value = dictionary.pop(key)
logger.info("Removed item with key '%s' and value '%s'." % (key, value))
else:
logger.info("Key '%s' not found." % key)
def pop_from_dictionary(self, dictionary, key, default=NOT_SET):
"""Pops the given ``key`` from the ``dictionary`` and returns its value.
By default the keyword fails if the given ``key`` cannot be found from
the ``dictionary``. If optional ``default`` value is given, it will be
returned instead of failing.
Example:
| ${val}= | Pop From Dictionary | ${D3} | b |
=>
| ${val} = 2
| ${D3} = {'a': 1, 'c': 3}
New in Robot Framework 2.9.2.
"""
if default is NOT_SET:
self.dictionary_should_contain_key(dictionary, key)
return dictionary.pop(key)
return dictionary.pop(key, default)
def keep_in_dictionary(self, dictionary, *keys):
"""Keeps the given ``keys`` in the ``dictionary`` and removes all other.
If the given ``key`` cannot be found from the ``dictionary``, it
is ignored.
Example:
| Keep In Dictionary | ${D5} | b | x | d |
=>
| ${D5} = {'b': 2, 'd': 4}
"""
remove_keys = [k for k in dictionary if k not in keys]
self.remove_from_dictionary(dictionary, *remove_keys)
def copy_dictionary(self, dictionary):
"""Returns a copy of the given dictionary.
The given dictionary is never altered by this keyword.
"""
return dictionary.copy()
def get_dictionary_keys(self, dictionary):
"""Returns keys of the given ``dictionary``.
If keys are sortable, they are returned in sorted order. The given
``dictionary`` is never altered by this keyword.
Example:
| ${keys} = | Get Dictionary Keys | ${D3} |
=>
| ${keys} = ['a', 'b', 'c']
"""
# TODO: Possibility to disable sorting. Can be handy with OrderedDicts.
keys = dictionary.keys()
try:
return sorted(keys)
except TypeError:
return list(keys)
def get_dictionary_values(self, dictionary):
"""Returns values of the given dictionary.
Values are returned sorted according to keys. The given dictionary is
never altered by this keyword.
Example:
| ${values} = | Get Dictionary Values | ${D3} |
=>
| ${values} = [1, 2, 3]
"""
return [dictionary[k] for k in self.get_dictionary_keys(dictionary)]
def get_dictionary_items(self, dictionary):
"""Returns items of the given ``dictionary``.
Items are returned sorted by keys. The given ``dictionary`` is not
altered by this keyword.
Example:
| ${items} = | Get Dictionary Items | ${D3} |
=>
| ${items} = ['a', 1, 'b', 2, 'c', 3]
"""
ret = []
for key in self.get_dictionary_keys(dictionary):
ret.extend((key, dictionary[key]))
return ret
def get_from_dictionary(self, dictionary, key):
"""Returns a value from the given ``dictionary`` based on the given ``key``.
If the given ``key`` cannot be found from the ``dictionary``, this
keyword fails.
The given dictionary is never altered by this keyword.
Example:
| ${value} = | Get From Dictionary | ${D3} | b |
=>
| ${value} = 2
"""
try:
return dictionary[key]
except KeyError:
raise RuntimeError("Dictionary does not contain key '%s'." % key)
def dictionary_should_contain_key(self, dictionary, key, msg=None):
"""Fails if ``key`` is not found from ``dictionary``.
See `List Should Contain Value` for an explanation of ``msg``.
The given dictionary is never altered by this keyword.
"""
default = "Dictionary does not contain key '%s'." % key
_verify_condition(key in dictionary, default, msg)
def dictionary_should_not_contain_key(self, dictionary, key, msg=None):
"""Fails if ``key`` is found from ``dictionary``.
See `List Should Contain Value` for an explanation of ``msg``.
The given dictionary is never altered by this keyword.
"""
default = "Dictionary contains key '%s'." % key
_verify_condition(key not in dictionary, default, msg)
def dictionary_should_contain_item(self, dictionary, key, value, msg=None):
"""An item of ``key``/``value`` must be found in a `dictionary`.
Value is converted to unicode for comparison.
See `Lists Should Be Equal` for an explanation of ``msg``.
The given dictionary is never altered by this keyword.
"""
self.dictionary_should_contain_key(dictionary, key, msg)
actual, expected = unic(dictionary[key]), unic(value)
default = "Value of dictionary key '%s' does not match: %s != %s" % (key, actual, expected)
_verify_condition(actual == expected, default, msg)
def dictionary_should_contain_value(self, dictionary, value, msg=None):
"""Fails if ``value`` is not found from ``dictionary``.
See `List Should Contain Value` for an explanation of ``msg``.
The given dictionary is never altered by this keyword.
"""
default = "Dictionary does not contain value '%s'." % value
_verify_condition(value in dictionary.values(), default, msg)
def dictionary_should_not_contain_value(self, dictionary, value, msg=None):
"""Fails if ``value`` is found from ``dictionary``.
See `List Should Contain Value` for an explanation of ``msg``.
The given dictionary is never altered by this keyword.
"""
default = "Dictionary contains value '%s'." % value
_verify_condition(not value in dictionary.values(), default, msg)
def dictionaries_should_be_equal(self, dict1, dict2, msg=None, values=True):
"""Fails if the given dictionaries are not equal.
First the equality of dictionaries' keys is checked and after that all
the key value pairs. If there are differences between the values, those
are listed in the error message. The types of the dictionaries do not
need to be same.
See `Lists Should Be Equal` for more information about configuring
the error message with ``msg`` and ``values`` arguments.
The given dictionaries are never altered by this keyword.
"""
keys = self._keys_should_be_equal(dict1, dict2, msg, values)
self._key_values_should_be_equal(keys, dict1, dict2, msg, values)
def dictionary_should_contain_sub_dictionary(self, dict1, dict2, msg=None,
values=True):
"""Fails unless all items in ``dict2`` are found from ``dict1``.
See `Lists Should Be Equal` for more information about configuring
the error message with ``msg`` and ``values`` arguments.
The given dictionaries are never altered by this keyword.
"""
keys = self.get_dictionary_keys(dict2)
diffs = [unic(k) for k in keys if k not in dict1]
default = "Following keys missing from first dictionary: %s" \
% ', '.join(diffs)
_verify_condition(not diffs, default, msg, values)
self._key_values_should_be_equal(keys, dict1, dict2, msg, values)
def log_dictionary(self, dictionary, level='INFO'):
"""Logs the size and contents of the ``dictionary`` using given ``level``.
Valid levels are TRACE, DEBUG, INFO (default), and WARN.
If you only want to log the size, use keyword `Get Length` from
the BuiltIn library.
"""
logger.write('\n'.join(self._log_dictionary(dictionary)), level)
def _log_dictionary(self, dictionary):
if not dictionary:
yield 'Dictionary is empty.'
elif len(dictionary) == 1:
yield 'Dictionary has one item:'
else:
yield 'Dictionary size is %d and it contains following items:' % len(dictionary)
for key in self.get_dictionary_keys(dictionary):
yield '%s: %s' % (key, dictionary[key])
def _keys_should_be_equal(self, dict1, dict2, msg, values):
keys1 = self.get_dictionary_keys(dict1)
keys2 = self.get_dictionary_keys(dict2)
miss1 = [unic(k) for k in keys2 if k not in dict1]
miss2 = [unic(k) for k in keys1 if k not in dict2]
error = []
if miss1:
error += ['Following keys missing from first dictionary: %s'
% ', '.join(miss1)]
if miss2:
error += ['Following keys missing from second dictionary: %s'
% ', '.join(miss2)]
_verify_condition(not error, '\n'.join(error), msg, values)
return keys1
def _key_values_should_be_equal(self, keys, dict1, dict2, msg, values):
diffs = list(self._yield_dict_diffs(keys, dict1, dict2))
default = 'Following keys have different values:\n' + '\n'.join(diffs)
_verify_condition(not diffs, default, msg, values)
def _yield_dict_diffs(self, keys, dict1, dict2):
for key in keys:
try:
assert_equals(dict1[key], dict2[key], msg='Key %s' % (key,))
except AssertionError as err:
yield unic(err)
class Collections(_List, _Dictionary):
"""A test library providing keywords for handling lists and dictionaries.
``Collections`` is Robot Framework's standard library that provides a
set of keywords for handling Python lists and dictionaries. This
library has keywords, for example, for modifying and getting
values from lists and dictionaries (e.g. `Append To List`, `Get
From Dictionary`) and for verifying their contents (e.g. `Lists
Should Be Equal`, `Dictionary Should Contain Value`).
= Related keywords in BuiltIn =
Following keywords in the BuiltIn library can also be used with
lists and dictionaries:
| = Keyword Name = | = Applicable With = | = Comment = |
| `Create List` | lists |
| `Create Dictionary` | dicts | Was in Collections until RF 2.9. |
| `Get Length` | both |
| `Length Should Be` | both |
| `Should Be Empty` | both |
| `Should Not Be Empty` | both |
| `Should Contain` | both |
| `Should Not Contain` | both |
| `Should Contain X Times` | lists |
| `Should Not Contain X Times` | lists |
| `Get Count` | lists |
= Using with list-like and dictionary-like objects =
List keywords that do not alter the given list can also be used
with tuples, and to some extend also with other iterables.
`Convert To List` can be used to convert tuples and other iterables
to Python ``list`` objects.
Similarly dictionary keywords can, for most parts, be used with other
mappings. `Convert To Dictionary` can be used if real Python ``dict``
objects are needed.
= Boolean arguments =
Some keywords accept arguments that are handled as Boolean values true or
false. If such an argument is given as a string, it is considered false if
it is either empty or case-insensitively equal to ``false`` or ``no``.
Keywords verifying something that allow dropping actual and expected values
from the possible error message also consider string ``no values`` as false.
Other strings are considered true regardless their value, and other
argument types are tested using same
[http://docs.python.org/2/library/stdtypes.html#truth-value-testing|rules
as in Python].
True examples:
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=True | # Strings are generally true. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=yes | # Same as the above. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=${TRUE} | # Python ``True`` is true. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=${42} | # Numbers other than 0 are true. |
False examples:
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=False | # String ``false`` is false. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=no | # Also string ``no`` is false. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=${EMPTY} | # Empty string is false. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=${FALSE} | # Python ``False`` is false. |
| `Lists Should Be Equal` | ${x} | ${y} | Custom error | values=no values | # ``no values`` works with ``values`` argument |
Note that prior to Robot Framework 2.9 some keywords considered all
non-empty strings, including ``False``, to be true.
= Data in examples =
List related keywords use variables in format ``${Lx}`` in their examples.
They mean lists with as many alphabetic characters as specified by ``x``.
For example, ``${L1}`` means ``['a']`` and ``${L3}`` means
``['a', 'b', 'c']``.
Dictionary keywords use similar ``${Dx}`` variables. For example, ``${D1}``
means ``{'a': 1}`` and ``${D3}`` means ``{'a': 1, 'b': 2, 'c': 3}``.
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = get_version()
def should_contain_match(self, list, pattern, msg=None,
case_insensitive=False,
whitespace_insensitive=False):
"""Fails if ``pattern`` is not found in ``list``.
See `List Should Contain Value` for an explanation of ``msg``.
By default, pattern matching is similar to matching files in a shell
and is case-sensitive and whitespace-sensitive. In the pattern syntax,
``*`` matches to anything and ``?`` matches to any single character. You
can also prepend ``glob=`` to your pattern to explicitly use this pattern
matching behavior.
If you prepend ``regexp=`` to your pattern, your pattern will be used
according to the Python
[http://docs.python.org/2/library/re.html|re module] regular expression
syntax. Important note: Backslashes are an escape character, and must
be escaped with another backslash (e.g. ``regexp=\\\\d{6}`` to search for
``\\d{6}``). See `BuiltIn.Should Match Regexp` for more details.
If ``case_insensitive`` is given a true value (see `Boolean arguments`),
the pattern matching will ignore case.
If ``whitespace_insensitive`` is given a true value (see `Boolean
arguments`), the pattern matching will ignore whitespace.
Non-string values in lists are ignored when matching patterns.
The given list is never altered by this keyword.
See also ``Should Not Contain Match``.
Examples:
| Should Contain Match | ${list} | a* | | | # Match strings beginning with 'a'. |
| Should Contain Match | ${list} | regexp=a.* | | | # Same as the above but with regexp. |
| Should Contain Match | ${list} | regexp=\\\\d{6} | | | # Match strings containing six digits. |
| Should Contain Match | ${list} | a* | case_insensitive=True | | # Match strings beginning with 'a' or 'A'. |
| Should Contain Match | ${list} | ab* | whitespace_insensitive=yes | | # Match strings beginning with 'ab' with possible whitespace ignored. |
| Should Contain Match | ${list} | ab* | whitespace_insensitive=true | case_insensitive=true | # Same as the above but also ignore case. |
New in Robot Framework 2.8.6.
"""
matches = _get_matches_in_iterable(list, pattern, case_insensitive,
whitespace_insensitive)
default = "%s does not contain match for pattern '%s'." \
% (seq2str2(list), pattern)
_verify_condition(matches, default, msg)
def should_not_contain_match(self, list, pattern, msg=None,
case_insensitive=False,
whitespace_insensitive=False):
"""Fails if ``pattern`` is found in ``list``.
Exact opposite of `Should Contain Match` keyword. See that keyword
for information about arguments and usage in general.
New in Robot Framework 2.8.6.
"""
matches = _get_matches_in_iterable(list, pattern, case_insensitive,
whitespace_insensitive)
default = "%s contains match for pattern '%s'." \
% (seq2str2(list), pattern)
_verify_condition(not matches, default, msg)
def get_matches(self, list, pattern, case_insensitive=False,
whitespace_insensitive=False):
"""Returns a list of matches to ``pattern`` in ``list``.
For more information on ``pattern``, ``case_insensitive``, and
``whitespace_insensitive``, see `Should Contain Match`.
Examples:
| ${matches}= | Get Matches | ${list} | a* | # ${matches} will contain any string beginning with 'a' |
| ${matches}= | Get Matches | ${list} | regexp=a.* | # ${matches} will contain any string beginning with 'a' (regexp version) |
| ${matches}= | Get Matches | ${list} | a* | case_insensitive=${True} | # ${matches} will contain any string beginning with 'a' or 'A' |
New in Robot Framework 2.8.6.
"""
return _get_matches_in_iterable(list, pattern, case_insensitive,
whitespace_insensitive)
def get_match_count(self, list, pattern, case_insensitive=False,
whitespace_insensitive=False):
"""Returns the count of matches to ``pattern`` in ``list``.
For more information on ``pattern``, ``case_insensitive``, and
``whitespace_insensitive``, see `Should Contain Match`.
Examples:
| ${count}= | Get Match Count | ${list} | a* | # ${count} will be the count of strings beginning with 'a' |
| ${count}= | Get Match Count | ${list} | regexp=a.* | # ${matches} will be the count of strings beginning with 'a' (regexp version) |
| ${count}= | Get Match Count | ${list} | a* | case_insensitive=${True} | # ${matches} will be the count of strings beginning with 'a' or 'A' |
New in Robot Framework 2.8.6.
"""
return len(self.get_matches(list, pattern, case_insensitive,
whitespace_insensitive))
def _verify_condition(condition, default_msg, msg, values=False):
if condition:
return
if not msg:
msg = default_msg
elif is_truthy(values) and str(values).upper() != 'NO VALUES':
msg += '\n' + default_msg
raise AssertionError(msg)
def _get_matches_in_iterable(iterable, pattern, case_insensitive=False,
whitespace_insensitive=False):
if not is_string(pattern):
raise TypeError("Pattern must be string, got '%s'." % type_name(pattern))
regexp = False
if pattern.startswith('regexp='):
pattern = pattern[7:]
regexp = True
elif pattern.startswith('glob='):
pattern = pattern[5:]
matcher = Matcher(pattern,
caseless=is_truthy(case_insensitive),
spaceless=is_truthy(whitespace_insensitive),
regexp=regexp)
return [string for string in iterable
if is_string(string) and matcher.match(string)]
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9334")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9334")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
import pprint
import argparse
import sys
import os
import re
import seldon_utils
import zk_utils
import attr_schema_utils
gdata = {
'all_clients_node_path': "/all_clients",
}
def getOpts(args):
parser = argparse.ArgumentParser(prog='seldon-cli attr', description='Seldon Cli')
parser.add_argument('--action', help="the action to use", required=False, choices=['edit','show','apply'])
parser.add_argument('--client-name', help="the name of the client", required=False)
parser.add_argument('--json', help="the file containing attr json", required=False)
parser.add_argument('args', nargs=argparse.REMAINDER) # catch rest (non-options) as args
opts = parser.parse_args(args)
return opts
def pp(o):
p = pprint.PrettyPrinter(indent=4)
p.pprint(o)
def is_existing_client(zkroot, client_name):
client_names = os.listdir(zkroot + gdata["all_clients_node_path"])
if client_name in client_names:
return True
else:
return False
def ensure_client_has_attr(zkroot, zk_client, client_name):
data_fpath = "{zkroot}{all_clients_node_path}/{client_name}/attr/_data_".format(
zkroot=zkroot,all_clients_node_path=gdata["all_clients_node_path"],client_name=client_name)
if not os.path.isfile(data_fpath):
node_path = gdata["all_clients_node_path"]+"/"+client_name+"/attr"
if zk_client.exists(node_path):
write_node_value_to_file(zk_client, zkroot, node_path)
else:
default_attr_json = '{"types":[{"type_attrs":[{"name":"title","value_type":"string"}],"type_id":1,"type_name":"defaulttype"}]}'
data = seldon_utils.json_to_dict(default_attr_json)
write_data_to_file(data_fpath, data)
zk_utils.node_set(zk_client, node_path, seldon_utils.dict_to_json(data))
def write_data_to_file(data_fpath, data):
json = seldon_utils.dict_to_json(data, True) if isinstance(data,dict) else str(data)
seldon_utils.mkdir_p(os.path.dirname(data_fpath))
f = open(data_fpath,'w')
f.write(json)
f.write('\n')
f.close()
print "Writing data to file[{data_fpath}]".format(**locals())
def show_attr(data):
attr_types = data["types"]
print "types:"
for attr_type in attr_types:
attr_type_name = attr_type["type_name"]
attr_type_id = attr_type["type_id"]
attr_type_attrs = attr_type["type_attrs"]
print " [{attr_type_name}]".format(**locals())
print " type_id: {attr_type_id}".format(**locals())
print " type_attrs:"
for attr_type_attr in attr_type_attrs:
attrib_name = attr_type_attr["name"]
attrib_value = attr_type_attr["value_type"]
attrib_value_str = "enum["+",".join(attrib_value)+"]" if isinstance(attrib_value,list) else attrib_value
print " {attrib_name}: {attrib_value_str}".format(**locals())
def write_node_value_to_file(zk_client, zkroot, node_path):
node_value = zk_utils.node_get(zk_client, node_path)
node_value = node_value.strip()
if zk_utils.is_json_data(node_value):
data = seldon_utils.json_to_dict(node_value) if node_value != None and len(node_value)>0 else ""
else:
data = str(node_value)
data_fpath = zkroot + node_path + "/_data_"
write_data_to_file(data_fpath, data)
def action_show(command_data, opts):
client_name = opts.client_name
if client_name == None:
print "Need client name to show the attr for"
sys.exit(1)
zkroot = command_data["zkdetails"]["zkroot"]
if not is_existing_client(zkroot, client_name):
print "Invalid client[{client_name}]".format(**locals())
sys.exit(1)
zk_client = command_data["zkdetails"]["zk_client"]
ensure_client_has_attr(zkroot, zk_client, client_name)
data_fpath = zkroot + gdata["all_clients_node_path"] + "/" + client_name + "/attr/_data_"
f = open(data_fpath)
json = f.read()
f.close()
data = seldon_utils.json_to_dict(json)
show_attr(data)
def store_json(command_data,opts):
f = open(opts.json)
json = f.read()
f.close()
data = seldon_utils.json_to_dict(json)
zk_client = command_data["zkdetails"]["zk_client"]
zkroot = command_data["zkdetails"]["zkroot"]
data_fpath = zkroot + gdata["all_clients_node_path"] + "/" + opts.client_name + "/attr/_data_"
if data is None:
print "Invalid attr json!"
sys.exit(1)
else:
write_data_to_file(data_fpath, data)
node_path = gdata["all_clients_node_path"]+"/"+opts.client_name+"/attr"
zk_utils.node_set(zk_client, node_path, seldon_utils.dict_to_json(data))
def action_edit(command_data, opts):
client_name = opts.client_name
if client_name == None:
print "Need client name to show the attr for"
sys.exit(1)
zkroot = command_data["zkdetails"]["zkroot"]
if not is_existing_client(zkroot, client_name):
print "Invalid client[{client_name}]".format(**locals())
sys.exit(1)
zk_client = command_data["zkdetails"]["zk_client"]
ensure_client_has_attr(zkroot, zk_client, client_name)
data_fpath = zkroot + gdata["all_clients_node_path"] + "/" + client_name + "/attr/_data_"
#do the edit
from subprocess import call
editor=seldon_utils.get_editor()
call([editor, data_fpath])
f = open(data_fpath)
json = f.read()
f.close()
data = seldon_utils.json_to_dict(json)
if data is None:
print "Invalid attr json!"
else:
write_data_to_file(data_fpath, data)
node_path = gdata["all_clients_node_path"]+"/"+client_name+"/attr"
zk_utils.node_set(zk_client, node_path, seldon_utils.dict_to_json(data))
show_attr(data)
def action_apply(command_data, opts):
client_name = opts.client_name
if client_name == None:
print "Need client name to show the attr for"
sys.exit(1)
zkroot = command_data["zkdetails"]["zkroot"]
if not is_existing_client(zkroot, client_name):
print "Invalid client[{client_name}]".format(**locals())
sys.exit(1)
zk_client = command_data["zkdetails"]["zk_client"]
ensure_client_has_attr(zkroot, zk_client, client_name)
if not opts.json is None:
store_json(command_data,opts)
def get_db_jndi_name():
data_fpath = zkroot + gdata["all_clients_node_path"] + "/" + client_name + "/_data_"
f = open(data_fpath)
json = f.read()
data = seldon_utils.json_to_dict(json)
f.close()
DB_JNDI_NAME = data["DB_JNDI_NAME"] if isinstance(data, dict) and data.has_key("DB_JNDI_NAME") else ""
return DB_JNDI_NAME
def get_db_info(db_name):
data_fpath = zkroot + "/config/dbcp/_data_"
f = open(data_fpath)
json = f.read()
data = seldon_utils.json_to_dict(json)
f.close()
db_info = None
for db_info_entry in data['dbs']:
if db_info_entry['name'] == db_name:
db_info = db_info_entry
break
return db_info
def get_db_settings():
dbSettings = {}
dbSettings["host"]=re.search('://(.*?):(.*?),',db_info["jdbc"]).groups()[0]
dbSettings["user"]=db_info["user"]
dbSettings["password"]=db_info["password"]
return dbSettings
db_name = get_db_jndi_name()
db_info = get_db_info(db_name)
if db_info == None:
print "Invalid db name[{db_name}]".format(**locals())
return
dbSettings = get_db_settings()
scheme_file_path = zkroot + gdata["all_clients_node_path"] + "/" + client_name + "/attr/_data_"
clean = True
attr_schema_utils.create_schema(client_name, dbSettings, scheme_file_path, clean)
clean = False
attr_schema_utils.create_schema(client_name, dbSettings, scheme_file_path, clean)
def cmd_attr(gopts,command_data, command_args):
actions = {
"default" : action_show,
"show" : action_show,
"edit" : action_edit,
"apply" : action_apply,
}
opts = getOpts(command_args)
action = opts.action
if action == None:
actions["default"](command_data, opts)
else:
if actions.has_key(action):
actions[action](command_data, opts)
else:
print "Invalid action[{}]".format(action)
|
|
import logging
import traceback
import numpy as np
from eemeter.structures import EnergyTrace
logger = logging.getLogger(__name__)
class SplitModeledEnergyTrace(object):
''' Light wrapper around models applicable to a single trace which
fits and predicts multiple models for different segments.
Parameters
----------
trace : eemeter.structures.EnergyTrace
Trace to be modeled.
formatter : eemeter.modeling.formatter.Formatter
Formatter to prep trace data for modeling.
model_mapping : dict
Items of this dictionary map `modeling_period_label` s to models
modeling_period_set : eemeter.structures.ModelingPeriodSet
The set of modeling periods over which models should be applicable.
'''
def __init__(self, trace, formatter, model_mapping, modeling_period_set):
self.trace = trace
self.formatter = formatter
self.model_mapping = model_mapping
self.modeling_period_set = modeling_period_set
self.fit_outputs = {}
def __repr__(self):
return (
"SplitModeledEnergyTrace(trace={}, formatter={},"
" model_mapping={}, modeling_period_set={})"
.format(self.trace, self.formatter, self.model_mapping,
self.modeling_period_set)
)
def fit(self, weather_source):
''' Fit all models associated with this trace.
Parameters
----------
weather_source : eemeter.weather.ISDWeatherSource
Weather source to use in creating covariate data.
'''
for modeling_period_label, modeling_period in \
self.modeling_period_set.iter_modeling_periods():
filtered_data = self._filter_by_modeling_period(
self.trace, modeling_period)
filtered_trace = EnergyTrace(
self.trace.interpretation, data=filtered_data,
unit=self.trace.unit)
model = self.model_mapping[modeling_period_label]
try:
input_data = self.formatter.create_input(
filtered_trace, weather_source)
except:
logger.warn(
'For trace "{}" and modeling_period "{}", was not'
' able to format input data for {}.'
.format(self.trace.interpretation, modeling_period_label,
model)
)
self.fit_outputs[modeling_period_label] = {
"status": "FAILURE",
"traceback": traceback.format_exc(),
"start_date": None,
"end_date": None,
"rows": None,
}
continue
else:
input_description = self.formatter.describe_input(input_data)
outputs = {
"start_date": input_description.get('start_date'),
"end_date": input_description.get('end_date'),
"n_rows": input_description.get('n_rows'),
}
try:
outputs.update(model.fit(input_data))
except:
logger.warn(
'For trace "{}" and modeling_period "{}", {} was not'
' able to fit using input data: {}'
.format(self.trace.interpretation, modeling_period_label,
model, input_data)
)
outputs.update({
"status": "FAILURE",
"traceback": traceback.format_exc(),
})
else:
logger.info(
'Successfully fitted {} to formatted input data for'
' trace "{}" and modeling_period "{}".'
.format(model, self.trace.interpretation,
modeling_period_label)
)
outputs.update({"status": "SUCCESS"})
self.fit_outputs[modeling_period_label] = outputs
return self.fit_outputs
def predict(self, modeling_period_label, demand_fixture_data,
params=None):
''' Predict for any one of the modeling_periods associated with this
trace. Light wrapper around :code:`model.predict(` method.
Parameters
----------
modeling_period_label : str
Modeling period indicating which model to use in making the
prediction.
demand_fixture_data : object
Data (formatted by :code:`self.formatter`) over which prediction
should be made.
params : object, default None
Fitted parameters for the model. If :code:`None`, use parameters
found when :code:`.fit(` method was called.
'''
outputs = self.fit_outputs[modeling_period_label]
if outputs["status"] == "FAILURE":
logger.warn(
'Skipping prediction for modeling_period "{}" because'
' model fit failed.'.format(modeling_period_label)
)
return None
if params is None:
params = outputs["model_params"]
return self.model_mapping[modeling_period_label].predict(
demand_fixture_data, params)
def compute_derivative(self, modeling_period_label, derivative_callable,
**kwargs):
''' Compute a modeling derivative for this modeling period.
Parameters
----------
modeling_period_label : str
Label for modeling period for which derivative should be computed.
derivative_callable : callable
Callable which can be used as follows:
.. code-block: python
>>> derivative_callable(formatter, model, **kwargs)
**kwargs
Arbitrary keyword arguments to be passed to the derviative callable
'''
outputs = self.fit_outputs[modeling_period_label]
if outputs["status"] == "FAILURE":
return None
model = self.model_mapping[modeling_period_label]
try:
derivative = derivative_callable(self.formatter, model, **kwargs)
except Exception:
logger.exception("Derivative computation failed.")
return None
return derivative
@staticmethod
def _filter_by_modeling_period(trace, modeling_period):
start = modeling_period.start_date
end = modeling_period.end_date
if start is None:
if end is None:
filtered_df = trace.data.copy()
else:
filtered_df = trace.data[:end].copy()
else:
if end is None:
filtered_df = trace.data[start:].copy()
else:
filtered_df = trace.data[start:end].copy()
# require NaN last data point as cap
if filtered_df.shape[0] > 0:
filtered_df.value.iloc[-1] = np.nan
filtered_df.estimated.iloc[-1] = False
return filtered_df
|
|
"""Support for HDMI CEC devices as media players."""
import logging
from pycec.commands import CecCommand, KeyPressCommand, KeyReleaseCommand
from pycec.const import (
KEY_BACKWARD,
KEY_FORWARD,
KEY_MUTE_TOGGLE,
KEY_PAUSE,
KEY_PLAY,
KEY_STOP,
KEY_VOLUME_DOWN,
KEY_VOLUME_UP,
POWER_OFF,
POWER_ON,
STATUS_PLAY,
STATUS_STILL,
STATUS_STOP,
TYPE_AUDIO,
TYPE_PLAYBACK,
TYPE_RECORDER,
TYPE_TUNER,
)
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
DOMAIN,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from . import ATTR_NEW, CecDevice
_LOGGER = logging.getLogger(__name__)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return HDMI devices as +switches."""
if ATTR_NEW in discovery_info:
_LOGGER.debug("Setting up HDMI devices %s", discovery_info[ATTR_NEW])
entities = []
for device in discovery_info[ATTR_NEW]:
hdmi_device = hass.data.get(device)
entities.append(CecPlayerDevice(hdmi_device, hdmi_device.logical_address))
add_entities(entities, True)
class CecPlayerDevice(CecDevice, MediaPlayerEntity):
"""Representation of a HDMI device as a Media player."""
def __init__(self, device, logical) -> None:
"""Initialize the HDMI device."""
CecDevice.__init__(self, device, logical)
self.entity_id = f"{DOMAIN}.hdmi_{hex(self._logical_address)[2:]}"
def send_keypress(self, key):
"""Send keypress to CEC adapter."""
_LOGGER.debug(
"Sending keypress %s to device %s", hex(key), hex(self._logical_address)
)
self._device.send_command(KeyPressCommand(key, dst=self._logical_address))
self._device.send_command(KeyReleaseCommand(dst=self._logical_address))
def send_playback(self, key):
"""Send playback status to CEC adapter."""
self._device.async_send_command(CecCommand(key, dst=self._logical_address))
def mute_volume(self, mute):
"""Mute volume."""
self.send_keypress(KEY_MUTE_TOGGLE)
def media_previous_track(self):
"""Go to previous track."""
self.send_keypress(KEY_BACKWARD)
def turn_on(self):
"""Turn device on."""
self._device.turn_on()
self._state = STATE_ON
def clear_playlist(self):
"""Clear players playlist."""
raise NotImplementedError()
def turn_off(self):
"""Turn device off."""
self._device.turn_off()
self._state = STATE_OFF
def media_stop(self):
"""Stop playback."""
self.send_keypress(KEY_STOP)
self._state = STATE_IDLE
def play_media(self, media_type, media_id, **kwargs):
"""Not supported."""
raise NotImplementedError()
def media_next_track(self):
"""Skip to next track."""
self.send_keypress(KEY_FORWARD)
def media_seek(self, position):
"""Not supported."""
raise NotImplementedError()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
raise NotImplementedError()
def media_pause(self):
"""Pause playback."""
self.send_keypress(KEY_PAUSE)
self._state = STATE_PAUSED
def select_source(self, source):
"""Not supported."""
raise NotImplementedError()
def media_play(self):
"""Start playback."""
self.send_keypress(KEY_PLAY)
self._state = STATE_PLAYING
def volume_up(self):
"""Increase volume."""
_LOGGER.debug("%s: volume up", self._logical_address)
self.send_keypress(KEY_VOLUME_UP)
def volume_down(self):
"""Decrease volume."""
_LOGGER.debug("%s: volume down", self._logical_address)
self.send_keypress(KEY_VOLUME_DOWN)
@property
def state(self) -> str:
"""Cache state of device."""
return self._state
def update(self):
"""Update device status."""
device = self._device
if device.power_status in [POWER_OFF, 3]:
self._state = STATE_OFF
elif not self.support_pause:
if device.power_status in [POWER_ON, 4]:
self._state = STATE_ON
elif device.status == STATUS_PLAY:
self._state = STATE_PLAYING
elif device.status == STATUS_STOP:
self._state = STATE_IDLE
elif device.status == STATUS_STILL:
self._state = STATE_PAUSED
else:
_LOGGER.warning("Unknown state: %s", device.status)
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self.type_id == TYPE_RECORDER or self.type == TYPE_PLAYBACK:
return (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
)
if self.type == TYPE_TUNER:
return (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_PAUSE
| SUPPORT_STOP
)
if self.type_id == TYPE_AUDIO:
return (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
)
return SUPPORT_TURN_ON | SUPPORT_TURN_OFF
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
flags.DEFINE_float(
'cb_distortion_range', 0.1, 'Cb distortion range +/-')
flags.DEFINE_float(
'cr_distortion_range', 0.1, 'Cr distortion range +/-')
flags.DEFINE_boolean(
'use_fast_color_distort', True,
'apply fast color/chroma distortion if True, else apply'
'brightness/saturation/hue/contrast distortion')
FLAGS = flags.FLAGS
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.minimum(tf.maximum(image, 0.0), 1.0)
def distort_color_fast(image, scope=None):
"""Distort the color of a Tensor image.
Distort brightness and chroma values of input image
Args:
image: 3-D Tensor containing single image in [0, 1].
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
"""
with tf.name_scope(scope, 'distort_color', [image]):
br_delta = random_ops.random_uniform([], -32./255., 32./255., seed=None)
cb_factor = random_ops.random_uniform(
[], -FLAGS.cb_distortion_range, FLAGS.cb_distortion_range, seed=None)
cr_factor = random_ops.random_uniform(
[], -FLAGS.cr_distortion_range, FLAGS.cr_distortion_range, seed=None)
channels = tf.split(axis=2, num_or_size_splits=3, value=image)
red_offset = 1.402 * cr_factor + br_delta
green_offset = -0.344136 * cb_factor - 0.714136 * cr_factor + br_delta
blue_offset = 1.772 * cb_factor + br_delta
channels[0] += red_offset
channels[1] += green_offset
channels[2] += blue_offset
image = tf.concat(axis=2, values=channels)
image = tf.minimum(tf.maximum(image, 0.), 1.)
return image
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3./4., 4./3.),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image, height, width, bbox,
min_object_covered=0.1,
fast_mode=True,
scope=None,
add_image_summaries=True):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if add_image_summaries:
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.summary.image('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(
image,
bbox,
min_object_covered=min_object_covered,
area_range=(min_object_covered, 1.0))
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
if add_image_summaries:
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method),
num_cases=num_resize_cases)
if add_image_summaries:
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 1 or 4 ways to do it.
if FLAGS.use_fast_color_distort:
distorted_image = distort_color_fast(distorted_image)
else:
num_distort_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=num_distort_cases)
if add_image_summaries:
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image.set_shape([height, width, 3])
return image
def preprocess_image(image,
output_height,
output_width,
is_training=False,
scaled_images=True,
bbox=None,
min_object_covered=0.1,
fast_mode=True,
add_image_summaries=False):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image. If dtype is
tf.float32 then the range should be [0, 1], otherwise it would converted
to tf.float32 assuming that the range is [0, MAX], where MAX is largest
positive representable number for int(8/16/32) data type (see
`tf.image.convert_image_dtype` for details).
output_height: integer, image expected height.
output_width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
scaled_images: Whether to scale pixel values to the range [-1, 1].
If set to false, pixel values are in the range [0, 1].
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
fast_mode: Optional boolean, if True avoids slower transformations.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
image = preprocess_for_train(
image,
output_height,
output_width,
bbox,
min_object_covered,
fast_mode,
add_image_summaries=add_image_summaries)
else:
image = preprocess_for_eval(image, output_height, output_width)
if scaled_images:
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
|
|
#!/usr/local/bin/python2.6 -tt
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright 2004-present Facebook. All rights reserved.
#
"""
This file contains the main module code for buck python test programs.
By default, this is the main module for all python_test() rules. However,
rules can also specify their own custom main_module. If you write your own
main module, you can import this module as tools.test.stubs.fbpyunit, to access
any of its code to help implement your main module.
"""
from __future__ import print_function
import contextlib
import ctypes
import fnmatch
import json
import logging
import optparse
import os
import platform
import re
import sys
import time
import traceback
import unittest
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import imp
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import coverage
except ImportError:
coverage = None
try:
from importlib.machinery import SourceFileLoader
except ImportError:
SourceFileLoader = None
EXIT_CODE_SUCCESS = 0
EXIT_CODE_TEST_FAILURE = 70
class TestStatus(object):
ABORTED = "FAILURE"
PASSED = "SUCCESS"
FAILED = "FAILURE"
EXPECTED_FAILURE = "SUCCESS"
UNEXPECTED_SUCCESS = "FAILURE"
SKIPPED = "ASSUMPTION_VIOLATION"
class PathMatcher(object):
def __init__(self, include_patterns, omit_patterns):
self.include_patterns = include_patterns
self.omit_patterns = omit_patterns
def omit(self, path):
"""
Omit iff matches any of the omit_patterns or the include patterns are
not empty and none is matched
"""
path = os.path.realpath(path)
return any(fnmatch.fnmatch(path, p) for p in self.omit_patterns) or (
self.include_patterns
and not any(fnmatch.fnmatch(path, p) for p in self.include_patterns)
)
def include(self, path):
return not self.omit(path)
class DebugWipeFinder(object):
"""
PEP 302 finder that uses a DebugWipeLoader for all files which do not need
coverage
"""
def __init__(self, matcher):
self.matcher = matcher
def find_module(self, fullname, path=None):
_, _, basename = fullname.rpartition(".")
try:
fd, pypath, (_, _, kind) = imp.find_module(basename, path)
except Exception:
# Maybe it's a top level module
try:
fd, pypath, (_, _, kind) = imp.find_module(basename, None)
except Exception:
return None
if hasattr(fd, "close"):
fd.close()
if kind != imp.PY_SOURCE:
return None
if self.matcher.include(pypath):
return None
"""
This is defined to match CPython's PyVarObject struct
"""
class PyVarObject(ctypes.Structure):
_fields_ = [
("ob_refcnt", ctypes.c_long),
("ob_type", ctypes.c_void_p),
("ob_size", ctypes.c_ulong),
]
class DebugWipeLoader(SourceFileLoader):
"""
PEP302 loader that zeros out debug information before execution
"""
def get_code(self, fullname):
code = super(DebugWipeLoader, self).get_code(fullname)
if code:
# Ideally we'd do
# code.co_lnotab = b''
# But code objects are READONLY. Not to worry though; we'll
# directly modify CPython's object
code_impl = PyVarObject.from_address(id(code.co_lnotab))
code_impl.ob_size = 0
return code
return DebugWipeLoader(fullname, pypath)
def optimize_for_coverage(cov, include_patterns, omit_patterns):
"""
We get better performance if we zero out debug information for files which
we're not interested in. Only available in CPython 3.3+
"""
matcher = PathMatcher(include_patterns, omit_patterns)
if SourceFileLoader and platform.python_implementation() == "CPython":
sys.meta_path.insert(0, DebugWipeFinder(matcher))
class TeeStream(object):
def __init__(self, *streams):
self._streams = streams
def write(self, data):
for stream in self._streams:
stream.write(data)
def flush(self):
for stream in self._streams:
stream.flush()
def isatty(self):
return False
class CallbackStream(object):
def __init__(self, callback, bytes_callback=None, orig=None):
self._callback = callback
self._fileno = orig.fileno() if orig else None
# Python 3 APIs:
# - `encoding` is a string holding the encoding name
# - `errors` is a string holding the error-handling mode for encoding
# - `buffer` should look like an io.BufferedIOBase object
self.errors = orig.errors if orig else None
if bytes_callback:
# those members are only on the io.TextIOWrapper
self.encoding = orig.encoding if orig else "UTF-8"
self.buffer = CallbackStream(bytes_callback, orig=orig)
def write(self, data):
self._callback(data)
def flush(self):
pass
def isatty(self):
return False
def fileno(self):
return self._fileno
class BuckTestResult(unittest._TextTestResult):
"""
Our own TestResult class that outputs data in a format that can be easily
parsed by buck's test runner.
"""
def __init__(
self, stream, descriptions, verbosity, show_output, main_program, suite
):
super(BuckTestResult, self).__init__(stream, descriptions, verbosity)
self._main_program = main_program
self._suite = suite
self._results = []
self._current_test = None
self._saved_stdout = sys.stdout
self._saved_stderr = sys.stderr
self._show_output = show_output
def getResults(self):
return self._results
def startTest(self, test):
super(BuckTestResult, self).startTest(test)
# Pass in the real stdout and stderr filenos. We can't really do much
# here to intercept callers who directly operate on these fileno
# objects.
sys.stdout = CallbackStream(
self.addStdout, self.addStdoutBytes, orig=sys.stdout
)
sys.stderr = CallbackStream(
self.addStderr, self.addStderrBytes, orig=sys.stderr
)
self._current_test = test
self._test_start_time = time.time()
self._current_status = TestStatus.ABORTED
self._messages = []
self._stacktrace = None
self._stdout = ""
self._stderr = ""
def _find_next_test(self, suite):
"""
Find the next test that has not been run.
"""
for test in suite:
# We identify test suites by test that are iterable (as is done in
# the builtin python test harness). If we see one, recurse on it.
if hasattr(test, "__iter__"):
test = self._find_next_test(test)
# The builtin python test harness sets test references to `None`
# after they have run, so we know we've found the next test up
# if it's not `None`.
if test is not None:
return test
def stopTest(self, test):
sys.stdout = self._saved_stdout
sys.stderr = self._saved_stderr
super(BuckTestResult, self).stopTest(test)
# If a failure occured during module/class setup, then this "test" may
# actually be a `_ErrorHolder`, which doesn't contain explicit info
# about the upcoming test. Since we really only care about the test
# name field (i.e. `_testMethodName`), we use that to detect an actual
# test cases, and fall back to looking the test up from the suite
# otherwise.
if not hasattr(test, "_testMethodName"):
test = self._find_next_test(self._suite)
self._results.append(
{
"testCaseName": "{0}.{1}".format(
test.__class__.__module__, test.__class__.__name__
),
"testCase": test._testMethodName,
"type": self._current_status,
"time": int((time.time() - self._test_start_time) * 1000),
"message": os.linesep.join(self._messages),
"stacktrace": self._stacktrace,
"stdOut": self._stdout,
"stdErr": self._stderr,
}
)
self._current_test = None
def stopTestRun(self):
cov = self._main_program.get_coverage()
if cov is not None:
self._results.append({"coverage": cov})
@contextlib.contextmanager
def _withTest(self, test):
self.startTest(test)
yield
self.stopTest(test)
def _setStatus(self, test, status, message=None, stacktrace=None):
assert test == self._current_test
self._current_status = status
self._stacktrace = stacktrace
if message is not None:
if message.endswith(os.linesep):
message = message[:-1]
self._messages.append(message)
def setStatus(self, test, status, message=None, stacktrace=None):
# addError() may be called outside of a test if one of the shared
# fixtures (setUpClass/tearDownClass/setUpModule/tearDownModule)
# throws an error.
#
# In this case, create a fake test result to record the error.
if self._current_test is None:
with self._withTest(test):
self._setStatus(test, status, message, stacktrace)
else:
self._setStatus(test, status, message, stacktrace)
def setException(self, test, status, excinfo):
exctype, value, tb = excinfo
self.setStatus(
test,
status,
"{0}: {1}".format(exctype.__name__, value),
"".join(traceback.format_tb(tb)),
)
def addSuccess(self, test):
super(BuckTestResult, self).addSuccess(test)
self.setStatus(test, TestStatus.PASSED)
def addError(self, test, err):
super(BuckTestResult, self).addError(test, err)
self.setException(test, TestStatus.ABORTED, err)
def addFailure(self, test, err):
super(BuckTestResult, self).addFailure(test, err)
self.setException(test, TestStatus.FAILED, err)
def addSkip(self, test, reason):
super(BuckTestResult, self).addSkip(test, reason)
self.setStatus(test, TestStatus.SKIPPED, "Skipped: %s" % (reason,))
def addExpectedFailure(self, test, err):
super(BuckTestResult, self).addExpectedFailure(test, err)
self.setException(test, TestStatus.EXPECTED_FAILURE, err)
def addUnexpectedSuccess(self, test):
super(BuckTestResult, self).addUnexpectedSuccess(test)
self.setStatus(test, TestStatus.UNEXPECTED_SUCCESS, "Unexpected success")
def addStdout(self, val):
self._stdout += val
if self._show_output:
self._saved_stdout.write(val)
self._saved_stdout.flush()
def addStdoutBytes(self, val):
string = val.decode("utf-8", errors="backslashreplace")
self.addStdout(string)
def addStderr(self, val):
self._stderr += val
if self._show_output:
self._saved_stderr.write(val)
self._saved_stderr.flush()
def addStderrBytes(self, val):
string = val.decode("utf-8", errors="backslashreplace")
self.addStderr(string)
class BuckTestRunner(unittest.TextTestRunner):
def __init__(self, main_program, suite, show_output=True, **kwargs):
super(BuckTestRunner, self).__init__(**kwargs)
self.show_output = show_output
self._main_program = main_program
self._suite = suite
def _makeResult(self):
return BuckTestResult(
self.stream,
self.descriptions,
self.verbosity,
self.show_output,
self._main_program,
self._suite,
)
def _format_test_name(test_class, attrname):
"""
Format the name of the test buck-style.
"""
return "{0}.{1}#{2}".format(test_class.__module__, test_class.__name__, attrname)
class StderrLogHandler(logging.StreamHandler):
"""
This class is very similar to logging.StreamHandler, except that it
always uses the current sys.stderr object.
StreamHandler caches the current sys.stderr object when it is constructed.
This makes it behave poorly in unit tests, which may replace sys.stderr
with a StringIO buffer during tests. The StreamHandler will continue using
the old sys.stderr object instead of the desired StringIO buffer.
"""
def __init__(self):
logging.Handler.__init__(self)
@property
def stream(self):
return sys.stderr
class RegexTestLoader(unittest.TestLoader):
def __init__(self, regex=None):
self.regex = regex
super(RegexTestLoader, self).__init__()
def getTestCaseNames(self, testCaseClass):
"""
Return a sorted sequence of method names found within testCaseClass
"""
testFnNames = super(RegexTestLoader, self).getTestCaseNames(testCaseClass)
if self.regex is None:
return testFnNames
robj = re.compile(self.regex)
matched = []
for attrname in testFnNames:
fullname = _format_test_name(testCaseClass, attrname)
if robj.search(fullname):
matched.append(attrname)
return matched
class Loader(object):
def __init__(self, modules, regex=None):
self.modules = modules
self.regex = regex
def load_all(self):
loader = RegexTestLoader(self.regex)
test_suite = unittest.TestSuite()
for module_name in self.modules:
__import__(module_name, level=0)
module = sys.modules[module_name]
module_suite = loader.loadTestsFromModule(module)
test_suite.addTest(module_suite)
return test_suite
def load_args(self, args):
loader = RegexTestLoader(self.regex)
suites = []
for arg in args:
suite = loader.loadTestsFromName(arg)
# loadTestsFromName() can only process names that refer to
# individual test functions or modules. It can't process package
# names. If there were no module/function matches, check to see if
# this looks like a package name.
if suite.countTestCases() != 0:
suites.append(suite)
continue
# Load all modules whose name is <arg>.<something>
prefix = arg + "."
for module in self.modules:
if module.startswith(prefix):
suite = loader.loadTestsFromName(module)
suites.append(suite)
return loader.suiteClass(suites)
class MainProgram(object):
"""
This class implements the main program. It can be subclassed by
users who wish to customize some parts of the main program.
(Adding additional command line options, customizing test loading, etc.)
"""
DEFAULT_VERBOSITY = 2
def __init__(self, argv):
self.init_option_parser()
self.parse_options(argv)
self.setup_logging()
def init_option_parser(self):
usage = "%prog [options] [TEST] ..."
op = optparse.OptionParser(usage=usage, add_help_option=False)
self.option_parser = op
op.add_option(
"--hide-output",
dest="show_output",
action="store_false",
default=True,
help="Suppress data that tests print to stdout/stderr, and only "
"show it if the test fails.",
)
op.add_option(
"-o",
"--output",
help="Write results to a file in a JSON format to be read by Buck",
)
op.add_option(
"-f",
"--failfast",
action="store_true",
default=False,
help="Stop after the first failure",
)
op.add_option(
"-l",
"--list-tests",
action="store_true",
dest="list",
default=False,
help="List tests and exit",
)
op.add_option(
"-L",
"--list-format",
dest="list_format",
choices=["buck", "python"],
default="python",
help="List tests format",
)
op.add_option(
"-r",
"--regex",
default=None,
help="Regex to apply to tests, to only run those tests",
)
op.add_option(
"--collect-coverage",
action="store_true",
default=False,
help="Collect test coverage information",
)
op.add_option(
"--coverage-include",
default="*",
help='File globs to include in converage (split by ",")',
)
op.add_option(
"--coverage-omit",
default="",
help='File globs to omit from converage (split by ",")',
)
op.add_option(
"--logger",
action="append",
metavar="<category>=<level>",
default=[],
help="Configure log levels for specific logger categories",
)
op.add_option(
"-q",
"--quiet",
action="count",
default=0,
help="Decrease the verbosity (may be specified multiple times)",
)
op.add_option(
"-v",
"--verbosity",
action="count",
default=self.DEFAULT_VERBOSITY,
help="Increase the verbosity (may be specified multiple times)",
)
op.add_option(
"-?", "--help", action="help", help="Show this help message and exit"
)
def parse_options(self, argv):
self.options, self.test_args = self.option_parser.parse_args(argv[1:])
self.options.verbosity -= self.options.quiet
if self.options.collect_coverage and coverage is None:
self.option_parser.error("coverage module is not available")
self.options.coverage_include = self.options.coverage_include.split(",")
if self.options.coverage_omit == "":
self.options.coverage_omit = []
else:
self.options.coverage_omit = self.options.coverage_omit.split(",")
def setup_logging(self):
# Configure the root logger to log at INFO level.
# This is similar to logging.basicConfig(), but uses our
# StderrLogHandler instead of a StreamHandler.
fmt = logging.Formatter("%(pathname)s:%(lineno)s: %(message)s")
log_handler = StderrLogHandler()
log_handler.setFormatter(fmt)
root_logger = logging.getLogger()
root_logger.addHandler(log_handler)
root_logger.setLevel(logging.INFO)
level_names = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warn": logging.WARNING,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
"fatal": logging.FATAL,
}
for value in self.options.logger:
parts = value.rsplit("=", 1)
if len(parts) != 2:
self.option_parser.error(
"--logger argument must be of the "
"form <name>=<level>: %s" % value
)
name = parts[0]
level_name = parts[1].lower()
level = level_names.get(level_name)
if level is None:
self.option_parser.error(
"invalid log level %r for log category %s" % (parts[1], name)
)
logging.getLogger(name).setLevel(level)
def create_loader(self):
import __test_modules__
return Loader(__test_modules__.TEST_MODULES, self.options.regex)
def load_tests(self):
loader = self.create_loader()
if self.options.collect_coverage:
self.start_coverage()
include = self.options.coverage_include
omit = self.options.coverage_omit
if include and "*" not in include:
optimize_for_coverage(self.cov, include, omit)
if self.test_args:
suite = loader.load_args(self.test_args)
else:
suite = loader.load_all()
if self.options.collect_coverage:
self.cov.start()
return suite
def get_tests(self, test_suite):
tests = []
for test in test_suite:
if isinstance(test, unittest.TestSuite):
tests.extend(self.get_tests(test))
else:
tests.append(test)
return tests
def run(self):
test_suite = self.load_tests()
if self.options.list:
for test in self.get_tests(test_suite):
if self.options.list_format == "python":
name = str(test)
elif self.options.list_format == "buck":
method_name = getattr(test, "_testMethodName", "")
name = _format_test_name(test.__class__, method_name)
else:
raise Exception(
"Bad test list format: %s" % (self.options.list_format,)
)
print(name)
return EXIT_CODE_SUCCESS
else:
result = self.run_tests(test_suite)
if self.options.output is not None:
with open(self.options.output, "w") as f:
json.dump(result.getResults(), f, indent=4, sort_keys=True)
if not result.wasSuccessful():
return EXIT_CODE_TEST_FAILURE
return EXIT_CODE_SUCCESS
def run_tests(self, test_suite):
# Install a signal handler to catch Ctrl-C and display the results
# (but only if running >2.6).
if sys.version_info[0] > 2 or sys.version_info[1] > 6:
unittest.installHandler()
# Run the tests
runner = BuckTestRunner(
self,
test_suite,
verbosity=self.options.verbosity,
show_output=self.options.show_output,
)
result = runner.run(test_suite)
if self.options.collect_coverage and self.options.show_output:
self.cov.stop()
if self.cov.html_report:
self.cov.html_report()
else:
self.cov.report(file=sys.stdout)
return result
def start_coverage(self):
if not self.options.collect_coverage:
return
# Keep the original working dir in case tests use os.chdir
self._original_working_dir = os.getcwd()
self.cov = coverage.Coverage(
include=self.options.coverage_include, omit=self.options.coverage_omit
)
self.cov.erase()
self.cov.start()
def get_coverage(self):
if not self.options.collect_coverage:
return None
result = {}
# Switch back to the original working directory.
os.chdir(self._original_working_dir)
self.cov.stop()
try:
f = StringIO()
self.cov.report(file=f)
lines = f.getvalue().split("\n")
except coverage.misc.CoverageException:
# Nothing was covered. That's fine by us
return result
# N.B.: the format of the coverage library's output differs
# depending on whether one or more files are in the results
for line in lines[2:]:
if line.strip("-") == "":
break
r = line.split()[0]
analysis = self.cov.analysis2(r)
covString = self.convert_to_diff_cov_str(analysis)
if covString:
result[r] = covString
return result
def convert_to_diff_cov_str(self, analysis):
# Info on the format of analysis:
# http://nedbatchelder.com/code/coverage/api.html
if not analysis:
return None
numLines = max(
analysis[1][-1] if len(analysis[1]) else 0,
analysis[2][-1] if len(analysis[2]) else 0,
analysis[3][-1] if len(analysis[3]) else 0,
)
lines = ["N"] * numLines
for l in analysis[1]:
lines[l - 1] = "C"
for l in analysis[2]:
lines[l - 1] = "X"
for l in analysis[3]:
lines[l - 1] = "U"
return "".join(lines)
def main(argv):
return MainProgram(sys.argv).run()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides ShapeDiffFinder, which finds differences in OTF/TTF glyph shapes.
ShapeDiffFinder takes in two paths, to font binaries. It then provides methods
that compare these fonts, storing results in a report dictionary. These methods
are `find_area_diffs`, which compares glyph areas, `find_rendered_diffs`, which
compares harfbuzz output using PIL, and `find_shape_diffs`, which takes the
difference of shapes and calculates the area.
Some caveats: glyph areas can be the same even if the shapes are wildly
different (though they're useful for shapes that should be identical except
for some offset). Image comparison is usually either slow (hi-res) or inaccurate
(lo-res). Still, these are usually useful for raising red flags and catching
large errors.
"""
from __future__ import division
import os
from PIL import Image
import re
from io import BytesIO
import subprocess
import booleanOperations
from defcon import Glyph
from fontTools.pens.basePen import BasePen
from fontTools.ttLib import TTFont
from fontTools.pens.pointPen import PointToSegmentPen
from nototools.glyph_area_pen import GlyphAreaPen
from nototools import hb_input
from nototools.py23 import unichr
GDEF_UNDEF = 0
GDEF_MARK = 3
GDEF_LABELS = ["no class", "base", "ligature", "mark", "component"]
class ShapeDiffFinder:
"""Provides methods to report diffs in glyph shapes between OT Fonts."""
def __init__(self, file_a, file_b, stats, ratio_diffs=False, diff_threshold=0):
self.path_a = file_a
self.font_a = TTFont(self.path_a)
self.glyph_set_a = self.font_a.getGlyphSet()
self.gdef_a = {}
if (
"GDEF" in self.font_a
and not self.font_a["GDEF"].table.GlyphClassDef is None
):
self.gdef_a = self.font_a["GDEF"].table.GlyphClassDef.classDefs
self.path_b = file_b
self.font_b = TTFont(self.path_b)
self.glyph_set_b = self.font_b.getGlyphSet()
self.gdef_b = {}
if (
"GDEF" in self.font_b
and not self.font_b["GDEF"].table.GlyphClassDef is None
):
self.gdef_b = self.font_b["GDEF"].table.GlyphClassDef.classDefs
for stat_type in (
"compared",
"untested",
"unmatched",
"unicode_mismatch",
"gdef_mark_mismatch",
"zero_width_mismatch",
"input_mismatch",
):
if stat_type not in stats:
stats[stat_type] = []
self.stats = stats
self.ratio_diffs = ratio_diffs
self.diff_threshold = diff_threshold
self.basepath = os.path.basename(file_a)
def find_area_diffs(self):
"""Report differences in glyph areas."""
self.build_names()
pen_a = GlyphAreaPen(self.glyph_set_a)
pen_b = GlyphAreaPen(self.glyph_set_b)
mismatched = {}
for name in self.names:
self.glyph_set_a[name].draw(pen_a)
area_a = pen_a.pop()
self.glyph_set_b[name].draw(pen_b)
area_b = pen_b.pop()
if area_a != area_b:
mismatched[name] = (area_a, area_b)
stats = self.stats["compared"]
calc = self._calc_ratio if self.ratio_diffs else self._calc_diff
for name, areas in mismatched.items():
stats.append((calc(areas), name, self.basepath, areas[0], areas[1]))
def find_rendered_diffs(self, font_size=128, render_path=None):
"""Find diffs of glyphs as rendered by harfbuzz."""
hb_input_generator_a = hb_input.HbInputGenerator(self.font_a)
hb_input_generator_b = hb_input.HbInputGenerator(self.font_b)
if render_path:
font_name, _ = os.path.splitext(self.basepath)
render_path = os.path.join(render_path, font_name)
if not os.path.exists(render_path):
os.makedirs(render_path)
self.build_names()
diffs = []
for name in self.names:
class_a = self.gdef_a.get(name, GDEF_UNDEF)
class_b = self.gdef_b.get(name, GDEF_UNDEF)
if GDEF_MARK in (class_a, class_b) and class_a != class_b:
self.stats["gdef_mark_mismatch"].append(
(self.basepath, name, GDEF_LABELS[class_a], GDEF_LABELS[class_b])
)
continue
width_a = self.glyph_set_a[name].width
width_b = self.glyph_set_b[name].width
zwidth_a = width_a == 0
zwidth_b = width_b == 0
if zwidth_a != zwidth_b:
self.stats["zero_width_mismatch"].append(
(self.basepath, name, width_a, width_b)
)
continue
hb_args_a = hb_input_generator_a.input_from_name(name, pad=zwidth_a)
hb_args_b = hb_input_generator_b.input_from_name(name, pad=zwidth_b)
if hb_args_a != hb_args_b:
self.stats["input_mismatch"].append(
(self.basepath, name, hb_args_a, hb_args_b)
)
continue
# ignore unreachable characters
if not hb_args_a:
self.stats["untested"].append((self.basepath, name))
continue
features, text = hb_args_a
# ignore null character
if unichr(0) in text:
continue
img_file_a = BytesIO(
subprocess.check_output(
[
"hb-view",
"--font-size=%d" % font_size,
"--features=%s" % ",".join(features),
self.path_a,
text,
]
)
)
img_file_b = BytesIO(
subprocess.check_output(
[
"hb-view",
"--font-size=%d" % font_size,
"--features=%s" % ",".join(features),
self.path_b,
text,
]
)
)
img_a = Image.open(img_file_a)
img_b = Image.open(img_file_b)
width_a, height_a = img_a.size
width_b, height_b = img_b.size
data_a = img_a.getdata()
data_b = img_b.getdata()
img_file_a.close()
img_file_b.close()
width, height = max(width_a, width_b), max(height_a, height_b)
offset_ax = (width - width_a) // 2
offset_ay = (height - height_a) // 2
offset_bx = (width - width_b) // 2
offset_by = (height - height_b) // 2
diff = 0
for y in range(height):
for x in range(width):
ax, ay = x - offset_ax, y - offset_ay
bx, by = x - offset_bx, y - offset_by
if (
ax < 0
or bx < 0
or ax >= width_a
or bx >= width_b
or ay < 0
or by < 0
or ay >= height_a
or by >= height_b
):
diff += 1
else:
diff += (
abs(data_a[ax + ay * width_a] - data_b[bx + by * width_b])
/ 255
)
if self.ratio_diffs:
diff /= width * height
if render_path and diff > self.diff_threshold:
img_cmp = Image.new("RGB", (width, height))
data_cmp = list(img_cmp.getdata())
self._project(data_a, width_a, height_a, data_cmp, width, height, 1)
self._project(data_b, width_b, height_b, data_cmp, width, height, 0)
for y in range(height):
for x in range(width):
i = x + y * width
r, g, b = data_cmp[i]
assert b == 0
data_cmp[i] = r, g, min(r, g)
img_cmp.putdata(data_cmp)
img_cmp.save(self._rendered_png(render_path, name))
diffs.append((name, diff))
mismatched = {}
for name, diff in diffs:
if diff > self.diff_threshold:
mismatched[name] = diff
stats = self.stats["compared"]
for name, diff in mismatched.items():
stats.append((diff, name, self.basepath))
def _project(
self, src_data, src_width, src_height, dst_data, width, height, channel
):
"""Project a single-channel image onto a channel of an RGB image."""
offset_x = (width - src_width) // 2
offset_y = (height - src_height) // 2
for y in range(src_height):
for x in range(src_width):
src_i = x + y * src_width
dst_i = x + offset_x + (y + offset_y) * width
pixel = list(dst_data[dst_i])
pixel[channel] = src_data[src_i]
dst_data[dst_i] = tuple(pixel)
def find_shape_diffs(self):
"""Report differences in glyph shapes, using BooleanOperations."""
self.build_names()
area_pen = GlyphAreaPen(None)
pen = PointToSegmentPen(area_pen)
mismatched = {}
for name in self.names:
glyph_a = Glyph()
glyph_b = Glyph()
self.glyph_set_a[name].draw(Qu2CuPen(glyph_a.getPen(), self.glyph_set_a))
self.glyph_set_b[name].draw(Qu2CuPen(glyph_b.getPen(), self.glyph_set_b))
booleanOperations.xor(list(glyph_a), list(glyph_b), pen)
area = abs(area_pen.pop())
if area:
mismatched[name] = area
stats = self.stats["compared"]
for name, area in mismatched.items():
stats.append((area, name, self.basepath))
def find_area_shape_diff_products(self):
"""Report product of differences in glyph areas and glyph shapes."""
self.find_area_diffs()
old_compared = self.stats["compared"]
self.stats["compared"] = []
self.find_shape_diffs()
new_compared = {n: d for d, n, _ in self.stats["compared"]}
for i, (diff, name, font, area_a, area_b) in enumerate(old_compared):
if font != self.basepath:
continue
new_diff = diff * new_compared.get(name, 0)
old_compared[i] = new_diff, name, font, area_a, area_b
self.stats["compared"] = old_compared
def build_names(self):
"""Build a list of glyph names shared between the fonts."""
if hasattr(self, "names"):
return
stats = self.stats["unmatched"]
names_a = set(self.font_a.getGlyphOrder())
names_b = set(self.font_b.getGlyphOrder())
if names_a != names_b:
stats.append((self.basepath, names_a - names_b, names_b - names_a))
self.names = names_a & names_b
stats = self.stats["unicode_mismatch"]
reverse_cmap_a = hb_input.build_reverse_cmap(self.font_a)
reverse_cmap_b = hb_input.build_reverse_cmap(self.font_b)
mismatched = {}
for name in self.names:
unival_a = reverse_cmap_a.get(name)
unival_b = reverse_cmap_b.get(name)
if unival_a != unival_b:
mismatched[name] = (unival_a, unival_b)
if mismatched:
stats.append((self.basepath, mismatched.items()))
self.names -= set(mismatched.keys())
@staticmethod
def dump(stats, allowlist, out_lines, include_vals, multiple_fonts):
"""Return the results of run diffs.
Args:
stats: List of tuples with diff data which is sorted and printed.
allowlist: Names of glyphs to exclude from report.
out_lines: Number of diff lines to print.
include_vals: Include the values that have been diffed in report.
multiple_fonts: Designates whether stats have been accumulated from
multiple fonts, if so then font names will be printed as well.
"""
report = []
compared = sorted(s for s in stats["compared"] if s[1] not in allowlist)
compared.reverse()
fmt = "%s %s"
if include_vals:
fmt += " (%s vs %s)"
if multiple_fonts:
fmt = "%s " + fmt
report.append("%d differences in glyph shape" % len(compared))
for line in compared[:out_lines]:
# print <font> <glyph> <vals>; stats are sorted in reverse priority
line = tuple(reversed(line[:3])) + tuple(line[3:])
# ignore font name if just one pair of fonts was compared
if not multiple_fonts:
line = line[1:]
report.append(fmt % line)
report.append("")
for font, set_a, set_b in stats["unmatched"]:
report.append("Glyph coverage doesn't match in %s" % font)
report.append(" in A but not B: %s" % sorted(set_a))
report.append(" in B but not A: %s" % sorted(set_b))
report.append("")
for font, mismatches in stats["unicode_mismatch"]:
report.append("Glyph unicode values don't match in %s" % font)
for name, univals in sorted(mismatches):
univals = [(("0x%04X" % v) if v else str(v)) for v in univals]
report.append(" %s: %s in A, %s in B" % (name, univals[0], univals[1]))
report.append("")
ShapeDiffFinder._add_simple_report(
report,
stats["gdef_mark_mismatch"],
"%s: Mark class mismatch for %s (%s vs %s)",
)
ShapeDiffFinder._add_simple_report(
report,
stats["zero_width_mismatch"],
"%s: Zero-width mismatch for %s (%d vs %d)",
)
ShapeDiffFinder._add_simple_report(
report,
stats["input_mismatch"],
"%s: Harfbuzz input mismatch for %s (%s vs %s)",
)
ShapeDiffFinder._add_simple_report(
report, stats["untested"], "%s: %s not tested (unreachable?)"
)
return "\n".join(report)
@staticmethod
def _add_simple_report(report, stats, fmt):
for stat in sorted(stats):
report.append(fmt % stat)
if stats:
report.append("")
def _calc_diff(self, vals):
"""Calculate an area difference."""
a, b = vals
return abs(a - b)
def _calc_ratio(self, vals):
"""Calculate an area ratio."""
a, b = vals
if not (a or b):
return 0
if abs(a) > abs(b):
a, b = b, a
return 1 - a / b
def _rendered_png(self, render_path, glyph_name):
glyph_filename = re.sub(r"([A-Z_])", r"\1_", glyph_name) + ".png"
return os.path.join(render_path, glyph_filename)
class Qu2CuPen(BasePen):
def __init__(self, pen, glyphSet):
BasePen.__init__(self, glyphSet)
self.pen = pen
def _moveTo(self, pt):
self.pen.moveTo(pt)
def _lineTo(self, pt):
self.pen.lineTo(pt)
def _curveToOne(self, pt1, pt2, pt3):
self.pen.curveTo(pt1, pt2, pt3)
def _closePath(self):
self.pen.closePath()
def _endPath(self):
self.pen.endPath()
|
|
# dvcz/user.py
"""
User, PubUser, Committer, and PubCommitter objects and related classes
and functions.
"""
import os
# import re
import sys
import time
import hashlib
from buildlist import(check_dirs_in_path, generate_rsa_key,
read_rsa_key, rm_f_dir_contents)
from dvcz import DvczError
from dvcz.project import Project
from xlattice import HashTypes
from xlu import UDir
from Crypto.PublicKey import RSA
if sys.version_info < (3, 6):
# pylint: disable=unused-import
import sha3
assert sha3 # suppress warning
# == adduser ========================================================
def make_committer_id(pubkey, hashtype=HashTypes.SHA2):
"""
Create a unique committer ID derived from the user's RSA public key
using this SHA type.
This implementation adds the current time to the hash.
Returns a 40- or 64-character hex value.
"""
if hashtype == HashTypes.SHA1:
sha = hashlib.sha1()
elif hashtype == HashTypes.SHA2:
sha = hashlib.sha256()
elif hashtype == HashTypes.SHA3:
sha = hashlib.sha3_256()
elif hashtype == HashTypes.BLAKE2B:
sha = hashlib.blake2b(digest_size=32)
else:
raise NotImplementedError
sha.update(pubkey.exportKey()) # PEM format
sha.update(str(time.time()).encode('utf-8'))
return sha.hexdigest()
def do_add_user(options):
"""
Carry out the configuration.
"""
if options.testing:
if os.path.exists('tmp'):
rm_f_dir_contents('tmp') # empties the directory
os.makedirs(options.home, exist_ok=True, mode=0o755)
# user_dvcz_path ------------------------------------------------
# this is $HOME/.dvcz unless testing
if os.path.exists(options.user_dvcz_path) and options.force:
rm_f_dir_contents(options.user_dvcz_path) # empties the directory
if not os.path.exists(options.user_dvcz_path):
os.makedirs(options.user_dvcz_path, exist_ok=True, mode=0o755)
# write RSA private key to $DVCZ_DIR/node/sk_priv.pem
if not os.path.exists(options.key_path):
check_dirs_in_path(options.key_path)
if options.testing:
generate_rsa_key(options.key_path, 1024)
else:
generate_rsa_key(options.key_path, options.key_bits)
# Read the RSA private key from disk ------------------
privkey = read_rsa_key(options.key_path)
pubkey = privkey.publickey()
# Generate and write a unique committer ID to $DVCZ_DIR/id.
path_to_id = os.path.join(options.user_dvcz_path, 'id')
if os.path.exists(path_to_id):
with open(path_to_id, 'r') as file:
committer_id = file.read()
else:
committer_id = make_committer_id(pubkey, options.hashtype)
with open(path_to_id, 'w+') as file:
file.write(committer_id)
# DEBUG
print("committer ID: %s" % committer_id)
# END
# proj_dvcz_path ------------------------------------------------
# if testing, remove it; otherwise just make sure that it exists
if options.testing and os.path.exists(options.proj_dvcz_path):
# DEBUG
print("deleting %s" % options.proj_dvcz_path)
# END
rm_f_dir_contents(options.proj_dvcz_path) # empties directory
os.makedirs(options.proj_dvcz_path, 0o755, exist_ok=True)
# u_path --------------------------------------------------------
hashtype = options.hashtype
if options.testing and options.u_path and os.path.exists(options.u_path):
rm_f_dir_contents(options.u_path)
if options.u_path:
# if necessary create $U_DIR with requisite DIR_STRUC and hashtype
# u_dir =
UDir.discover(options.u_path, hashtype=hashtype)
# can get SHA type from u_dir
# create $U_DIR/in/$ID/ which is DIR_FLAT with the correct hashtype
my_in_path = os.path.join(options.u_path, 'in', committer_id)
# my_in_dir =
UDir.discover(my_in_path, hashtype=hashtype)
# CLASSES ===========================================================
class _User(object):
"""
Abstract version of the User class.
Includes secret RSA keys.
"""
def __init__(self, login=os.environ['LOGNAME'],
sk_priv=None, ck_priv=None, key_bits=2048):
# The login must always be a valid name, one including no
# delimiters or other odd characters. At least for the mement
# we use the same rules for user names as Project names.
if not Project.valid_proj_name(login):
raise DvczError("not a valid login: '%s'" % login)
self._login = login
# Caller can supply keys with different sizes.
if sk_priv:
if sk_priv.size() + 1 != key_bits:
sk_priv = None
elif ck_priv.size() + 1 != key_bits:
ck_priv = None
if sk_priv is None:
sk_priv = RSA.generate(key_bits)
ck_priv = None
if ck_priv is None:
ck_priv = RSA.generate(key_bits)
# To write use
# with open(path, 'wb+') as file:
# file.write(sk_priv.exportKey('PEM'))
# To read use
# with open(path, 'rb') as file: sk_priv = RSA.importKey(file.read())
self._sk_priv = sk_priv
self._ck_priv = ck_priv
self._key_bits = sk_priv.size() + 1
@property
def login(self):
""" Return the user's login. """
return self._login
@property
def sk_priv(self):
""" Return the RSA key used for making digital signatures. """
return self._sk_priv
@property
def ck_priv(self):
""" Return the RSA key used for encryption. """
return self._ck_priv
@property
def key_bits(self):
"""
Return the size of the RSA key.
Note that PyCrypt's RSA size is 1 less than key_bits.
"""
return self._key_bits
class User(_User):
"""
Descriptor for DVCZ users.
Includes private keys and serialization/deserialization methods.
"""
START_LINE = '-----START DVCZ USER-----'
END_LINE = '-----END DVCZ USER-----'
def __init__(self, login=os.environ['LOGNAME'],
sk_priv=None, ck_priv=None, key_bits=2048):
# pylint: disable=useless-super-delegation
super().__init__(login, sk_priv, ck_priv, key_bits)
def __eq__(self, other):
if not isinstance(other, User):
return False
return self._login == other.login and \
self._sk_priv == other.sk_priv and \
self._ck_priv == other.ck_priv
def __str__(self):
# possible ValueErrors here
sk_exp_ = self.sk_priv.exportKey('PEM')
# pylint is confused here
# pylint: disable=no-member
sk_exp = sk_exp_.decode('utf-8')
ck_exp_ = self.ck_priv.exportKey('PEM')
# pylint is confused here
# pylint: disable=no-member
ck_exp = ck_exp_.decode('utf-8')
return """{0}
{1}
{2}
{3}
{4}
""".format(User.START_LINE,
self.login,
sk_exp,
ck_exp,
User.END_LINE)
@classmethod
def create_from_file(cls, path):
""" Parse the serialized User object. """
with open(path, 'r') as file:
text = file.read()
return cls.create_from_string(text)
@classmethod
def create_from_string(cls, string):
""" Parse the serialized User object. """
if not string:
raise DvczError('empty string')
strings = string.split('\n')
if strings[-1] == '\n':
strings = strings[:-1]
return cls.create_from_string_array(strings)
@classmethod
def create_from_string_array(cls, strings):
""" Parse the serialized User object from a list of strings. """
if not strings:
raise DvczError('empty string array')
def collect_priv(lines, offset, line_count):
"""
Interpret a list of lines of text as a PEM-formatted
RSA private key.
"""
# find the end of the PEM-formatted RSA private key
found = False
ndx = -1
for ndx in range(offset, line_count):
if lines[ndx] == '-----END RSA PRIVATE KEY-----':
found = True
break
if not found:
raise DvczError("can't find end of PEM-formatted RSA key")
text = '\n'.join(lines[offset:ndx + 1])
priv = RSA.importKey(text)
return (ndx + 1, priv)
line_count = len(strings)
if line_count < 5:
raise DvczError(
"too few parts (%d) in User string array" % line_count)
if strings[0] != cls.START_LINE:
raise DvczError("found '%s' instead of '%s'" % (
strings[0], cls.START_LINE))
login = strings[1]
offset = 2
offset, sk_priv = collect_priv(strings, offset, line_count)
offset, ck_priv = collect_priv(strings, offset, line_count)
if strings[offset] != cls.END_LINE:
raise DvczError("found '%s' instead of '%s'" % (
strings[offset], cls.END_LINE))
# XXX Ignoring possiblity of differences key sizes
key_bits = sk_priv.size() + 1 # XXX TILT: PyCrypto returns eg 1023
# DEBUG
# print("create_from_string_array: found sk_priv size is %d" %
# key_bits)
# END
return User(login, sk_priv, ck_priv, key_bits)
class _PubUser(object):
"""
This version of the user descriptor includes the public part of RSA keys.
"""
def __init__(self, login, sk_, ck_):
# The login must always be a valid name, one including no
# delimiters or other odd characters.
if not Project.valid_proj_name(login):
raise DvczError("not a valid login: '%s'" % login)
self._login = login
# To write use
# with open(path, 'wb+') as file: file.write(sk.exportKey('PEM'))
# To read use
# with open(path, 'rb') as file: sk = RSA.importKey(file.read())
self._sk = sk_
self._ck = ck_
@property
def login(self):
""" Return the User's login, a valid file name. """
return self._login
@property
def sk_(self):
""" Return the public part of the RSA key used for digital sigs. """
return self._sk
@property
def ck_(self):
""" Return the public part of the RSA key used for encryption. """
return self._ck
def __str__(self):
return "NOT IMPLEMENTED: User.__str__()"
class PubUser(_PubUser):
""" The public view of a User, one which conains no secret keys. """
@classmethod
def create_from_user(cls, user):
""" Replaces each private RSA key with its public part. """
return PubUser(user.login,
user.sk_priv.publickey(),
user.ck_priv.publickey())
def create_from_file(self, path):
""" Parse the serialized PubUser object. """
raise NotImplementedError()
class Committer(User):
"""
Specifies a Committer, a User which can commit changes to a
content-keyed store.
In addition to the attributes of a User, a Committer has a
handle, a unique valid file name unique with the cluster of
cooperating servers housing content-keyed data stores.
"""
START_LINE = '-----START DVCZ COMMITTER-----'
END_LINE = '-----END DVCZ COMMITTER-----'
def __init__(self, handle, login=os.environ['LOGNAME'],
sk_priv=None, ck_priv=None, key_bits=2048):
if not Project.valid_proj_name(handle):
raise DvczError("'%s' is not a valid handle" % handle)
super().__init__(login, sk_priv, ck_priv, key_bits)
self._handle = handle
@property
def handle(self):
""" Return the committer's handle, a valid name. """
return self._handle
def __eq__(self, other):
if not isinstance(other, Committer):
return False
return self._handle == other.handle and \
self._login == other.login and \
self._sk_priv == other.sk_priv and \
self._ck_priv == other.ck_priv
def __str__(self):
return """{0}
{1}
{2}{3}
""".format(Committer.START_LINE,
self.handle,
super().__str__(),
Committer.END_LINE)
@classmethod
def create_from_file(cls, path):
""" Parse the serialized Committer object. """
with open(path, 'r') as file:
text = file.read()
return cls.create_from_string(text)
@classmethod
def create_from_string(cls, string):
""" Parse the serialized Committer object. """
if not string:
raise DvczError('empty string')
# DEBUG
# print("Committer.create_from_string: input is:\n%s" % string)
# END
strings = string.split('\n')
while strings[-1] == '':
strings = strings[:-1]
return cls.create_from_string_array(strings)
@classmethod
def create_from_string_array(cls, strings):
""" Parse the serialized Committer object from a list of strings. """
if not strings:
raise DvczError("empty string array")
line_count = len(strings)
if line_count < 5:
raise DvczError(
"too few lines (%d) in Committer string array" % line_count)
if strings[0] != cls.START_LINE:
raise DvczError("found '%s' instead of '%s'" % (
strings[0], cls.START_LINE))
handle = strings[1]
if strings[-1] != cls.END_LINE:
raise DvczError("found '%s' instead of '%s'" % (
strings[-1], cls.END_LINE))
user = User.create_from_string_array(strings[2:-1])
return Committer(handle,
user.login,
user.sk_priv,
user.ck_priv,
user.key_bits)
class PubCommitter(_PubUser):
"""
The public view of a Committer, one which contains no secret keys.
"""
def __init__(self, handle, login=os.environ['LOGNAME'],
sk_=None, ck_=None):
if not Project.valid_proj_name(handle):
raise DvczError("'%s' is not a valid handle" % handle)
super().__init__(login, sk_, ck_)
self._handle = handle
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## UAI Train related
'''
Default work dir. The working dir for the traing job, it will contains:
/data/data --data_dir
/data/output --output_dir
Note: DO NOT CHANGE THIS VALUE
UCloud Train Job Executor Will Set it Automatically
'''
flags.DEFINE_string("work_dir", "/data", "Default work path")
'''
Default data path used in Training, all data will be downloaded into this path
Please use data in this path as input for Training
Note: DO NOT CHANGE THIS VALUE
UCloud Train Job Executor Will Set it Automatically
'''
#flags.DEFINE_string("data_dir", "/data/data", "Default data path")
'''
Default output path used in Training, files in this path will be uploaded to UFile
after training finished.
You can also assume your checkpoint files inside output_path (If you provided
in the UCloud console), files will also be downloaded into this path befor
Training start
Note: DO NOT CHANGE THIS VALUE
UCloud Train Job Executor Will Set it Automatically
'''
#flags.DEFINE_string("output_dir", "/data/output", "Default output path")
'''
Default tensorboard output path used in Training, iles in this path will be uploaded to UFile
after training finished.
This dir is same as output_dir
Note: DO NOT CHANGE THIS VALUE
UCloud Train Job Executor Will Set it Automatically
'''
flags.DEFINE_string("log_dir", "/data/output", "Default log path")
'''
Define num_gpus for training
Note: DO NOT CHANGE THIS VALUE
UCloud Train Job Executor Will Set it Automatically
'''
flags.DEFINE_integer("num_gpus", 0, "Num of avaliable gpus")
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import pipes
import shutil
import subprocess
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
chrome_src = os.path.abspath(os.path.join(script_dir, os.pardir))
SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(chrome_src, 'tools', 'gyp', 'pylib'))
json_data_file = os.path.join(script_dir, 'win_toolchain.json')
def SetEnvironmentAndGetRuntimeDllDirs():
"""Sets up os.environ to use the depot_tools VS toolchain with gyp, and
returns the location of the VS runtime DLLs so they can be copied into
the output directory after gyp generation.
"""
vs2013_runtime_dll_dirs = None
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
# When running on a non-Windows host, only do this if the SDK has explicitly
# been downloaded before (in which case json_data_file will exist).
if ((sys.platform in ('win32', 'cygwin') or os.path.exists(json_data_file))
and depot_tools_win_toolchain):
if not os.path.exists(json_data_file):
Update()
with open(json_data_file, 'r') as tempf:
toolchain_data = json.load(tempf)
toolchain = toolchain_data['path']
version = toolchain_data['version']
win_sdk = toolchain_data.get('win_sdk')
if not win_sdk:
win_sdk = toolchain_data['win8sdk']
wdk = toolchain_data['wdk']
# TODO(scottmg): The order unfortunately matters in these. They should be
# split into separate keys for x86 and x64. (See CopyVsRuntimeDlls call
# below). http://crbug.com/345992
vs2013_runtime_dll_dirs = toolchain_data['runtime_dirs']
# We may set DEPOT_TOOLS_WIN_TOOLCHAIN = 2 in gn-standalone build
if os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN') != '2':
os.environ['GYP_MSVS_OVERRIDE_PATH'] = toolchain
os.environ['GYP_MSVS_VERSION'] = version
# We need to make sure windows_sdk_path is set to the automated
# toolchain values in GYP_DEFINES, but don't want to override any
# otheroptions.express
# values there.
import gyp
gyp_defines_dict = gyp.NameValueListToDict(gyp.ShlexEnv('GYP_DEFINES'))
gyp_defines_dict['windows_sdk_path'] = win_sdk
os.environ['GYP_DEFINES'] = ' '.join('%s=%s' % (k, pipes.quote(str(v)))
for k, v in gyp_defines_dict.iteritems())
os.environ['WINDOWSSDKDIR'] = win_sdk
os.environ['WDK_DIR'] = wdk
# Include the VS runtime in the PATH in case it's not machine-installed.
runtime_path = ';'.join(vs2013_runtime_dll_dirs)
os.environ['PATH'] = runtime_path + ';' + os.environ['PATH']
return vs2013_runtime_dll_dirs
def _VersionNumber():
"""Gets the standard version number ('120', '140', etc.) based on
GYP_MSVS_VERSION."""
if os.environ['GYP_MSVS_VERSION'] == '2013':
return '120'
elif os.environ['GYP_MSVS_VERSION'] == '2015':
return '140'
else:
raise ValueError('Unexpected GYP_MSVS_VERSION')
def _CopyRuntimeImpl(target, source):
"""Copy |source| to |target| if it doesn't already exist or if it
needs to be updated.
"""
if (os.path.isdir(os.path.dirname(target)) and
(not os.path.isfile(target) or
os.stat(target).st_mtime != os.stat(source).st_mtime)):
print 'Copying %s to %s...' % (source, target)
if os.path.exists(target):
os.unlink(target)
shutil.copy2(source, target)
def _CopyRuntime2013(target_dir, source_dir, dll_pattern):
"""Copy both the msvcr and msvcp runtime DLLs, only if the target doesn't
exist, but the target directory does exist."""
for file_part in ('p', 'r'):
dll = dll_pattern % file_part
target = os.path.join(target_dir, dll)
source = os.path.join(source_dir, dll)
_CopyRuntimeImpl(target, source)
def _CopyRuntime2015(target_dir, source_dir, dll_pattern):
"""Copy both the msvcp and vccorlib runtime DLLs, only if the target doesn't
exist, but the target directory does exist."""
for file_part in ('msvcp', 'vccorlib'):
dll = dll_pattern % file_part
target = os.path.join(target_dir, dll)
source = os.path.join(source_dir, dll)
_CopyRuntimeImpl(target, source)
def CopyVsRuntimeDlls(output_dir, runtime_dirs):
"""Copies the VS runtime DLLs from the given |runtime_dirs| to the output
directory so that even if not system-installed, built binaries are likely to
be able to run.
This needs to be run after gyp has been run so that the expected target
output directories are already created.
"""
x86, x64 = runtime_dirs
out_debug = os.path.join(output_dir, 'Debug')
out_debug_nacl64 = os.path.join(output_dir, 'Debug', 'x64')
out_release = os.path.join(output_dir, 'Release')
out_release_nacl64 = os.path.join(output_dir, 'Release', 'x64')
out_debug_x64 = os.path.join(output_dir, 'Debug_x64')
out_release_x64 = os.path.join(output_dir, 'Release_x64')
if os.path.exists(out_debug) and not os.path.exists(out_debug_nacl64):
os.makedirs(out_debug_nacl64)
if os.path.exists(out_release) and not os.path.exists(out_release_nacl64):
os.makedirs(out_release_nacl64)
if os.environ.get('GYP_MSVS_VERSION') == '2015':
_CopyRuntime2015(out_debug, x86, '%s140d.dll')
_CopyRuntime2015(out_release, x86, '%s140.dll')
_CopyRuntime2015(out_debug_x64, x64, '%s140d.dll')
_CopyRuntime2015(out_release_x64, x64, '%s140.dll')
_CopyRuntime2015(out_debug_nacl64, x64, '%s140d.dll')
_CopyRuntime2015(out_release_nacl64, x64, '%s140.dll')
else:
# VS2013 is the default.
_CopyRuntime2013(out_debug, x86, 'msvc%s120d.dll')
_CopyRuntime2013(out_release, x86, 'msvc%s120.dll')
_CopyRuntime2013(out_debug_x64, x64, 'msvc%s120d.dll')
_CopyRuntime2013(out_release_x64, x64, 'msvc%s120.dll')
_CopyRuntime2013(out_debug_nacl64, x64, 'msvc%s120d.dll')
_CopyRuntime2013(out_release_nacl64, x64, 'msvc%s120.dll')
# Copy the PGO runtime library to the release directories.
if os.environ.get('GYP_MSVS_OVERRIDE_PATH'):
pgo_x86_runtime_dir = os.path.join(os.environ.get('GYP_MSVS_OVERRIDE_PATH'),
'VC', 'bin')
pgo_x64_runtime_dir = os.path.join(pgo_x86_runtime_dir, 'amd64')
pgo_runtime_dll = 'pgort' + _VersionNumber() + '.dll'
source_x86 = os.path.join(pgo_x86_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x86):
_CopyRuntimeImpl(os.path.join(out_release, pgo_runtime_dll), source_x86)
source_x64 = os.path.join(pgo_x64_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x64):
_CopyRuntimeImpl(os.path.join(out_release_x64, pgo_runtime_dll),
source_x64)
def CopyDlls(target_dir, configuration, target_cpu):
"""Copy the VS runtime DLLs into the requested directory as needed.
configuration is one of 'Debug' or 'Release'.
target_cpu is one of 'x86' or 'x64'.
The debug configuration gets both the debug and release DLLs; the
release config only the latter.
"""
vs2013_runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
if not vs2013_runtime_dll_dirs:
return
x64_runtime, x86_runtime = vs2013_runtime_dll_dirs
runtime_dir = x64_runtime if target_cpu == 'x64' else x86_runtime
_CopyRuntime2013(
target_dir, runtime_dir, 'msvc%s' + _VersionNumber() + '.dll')
if configuration == 'Debug':
_CopyRuntime2013(
target_dir, runtime_dir, 'msvc%s' + _VersionNumber() + 'd.dll')
def _GetDesiredVsToolchainHashes():
"""Load a list of SHA1s corresponding to the toolchains that we want installed
to build with."""
if os.environ.get('GYP_MSVS_VERSION') == '2015':
return ['49ae4b60d898182fc3f521c2fcda82c453915011']
else:
# Default to VS2013.
return ['ee7d718ec60c2dc5d255bbe325909c2021a7efef']
def Update(force=False):
"""Requests an update of the toolchain to the specific hashes we have at
this revision. The update outputs a .json of the various configuration
information required to pass to gyp which we use in |GetToolchainDir()|.
"""
if force != False and force != '--force':
print >>sys.stderr, 'Unknown parameter "%s"' % force
return 1
if force == '--force' or os.path.exists(json_data_file):
force = True
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
if ((sys.platform in ('win32', 'cygwin') or force) and
depot_tools_win_toolchain):
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
get_toolchain_args = [
sys.executable,
os.path.join(depot_tools_path,
'win_toolchain',
'get_toolchain_if_necessary.py'),
'--output-json', json_data_file,
] + _GetDesiredVsToolchainHashes()
if force:
get_toolchain_args.append('--force')
subprocess.check_call(get_toolchain_args)
return 0
def GetToolchainDir():
"""Gets location information about the current toolchain (must have been
previously updated by 'update'). This is used for the GN build."""
runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
# If WINDOWSSDKDIR is not set, search the default SDK path and set it.
if not 'WINDOWSSDKDIR' in os.environ:
default_sdk_path = 'C:\\Program Files (x86)\\Windows Kits\\8.1'
if os.path.isdir(default_sdk_path):
os.environ['WINDOWSSDKDIR'] = default_sdk_path
print '''vs_path = "%s"
sdk_path = "%s"
vs_version = "%s"
wdk_dir = "%s"
runtime_dirs = "%s"
''' % (
os.environ['GYP_MSVS_OVERRIDE_PATH'],
os.environ['WINDOWSSDKDIR'],
os.environ['GYP_MSVS_VERSION'],
os.environ.get('WDK_DIR', ''),
';'.join(runtime_dll_dirs or ['None']))
def main():
commands = {
'update': Update,
'get_toolchain_dir': GetToolchainDir,
'copy_dlls': CopyDlls,
}
if len(sys.argv) < 2 or sys.argv[1] not in commands:
print >>sys.stderr, 'Expected one of: %s' % ', '.join(commands)
return 1
return commands[sys.argv[1]](*sys.argv[2:])
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright (c) 2013 The SAYCBridge Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from z3b import enum
from core import suit
# The ordering of these values does not matter. We only use Enum so that
# python throws an lookup error when we typo the annotation name.
annotations = enum.Enum(
"Opening",
# FIXME: It's a bit odd that 1C, 1S, 2N can end up with both
# OneLevelSuitOpening and NotrumpSystemsOn.
# e.g. Does ResponderJumpShift apply after 2N?
"OneLevelSuitOpening", # 1-level suited response opening book.
"StrongTwoClubOpening", # 2C response opening book.
"NotrumpSystemsOn", # NT response opening book.
"StandardOvercall", # Overcall opening book.
"Preemptive", # Preemptive opening book.
"BidClubs",
"BidDiamonds",
"BidHearts",
"BidSpades",
"LimitRaise",
"OpenerReverse",
# Not all Cappelletti bids are artificial, some can be treated as to-play.
"Cappelletti",
# Quantitiative 4N is odd, but not artificial. :)
"QuantitativeFourNotrumpJump",
"Artificial",
# NOTE: RuleCompiler._compile_annotations will automatically imply
# "Artificial" when encountering any annotations > Artificial.
# This is a hack to avoid "forgot to add Artifical" bugs.
"Blackwood",
"FeatureRequest",
"FourthSuitForcing",
"Gerber",
"Jacoby2N",
"MichaelsCuebid",
"MichaelsMinorRequest",
"CappellettiMinorRequest",
"NegativeDouble",
"Stayman",
"TakeoutDouble",
"Transfer",
"Unusual2N",
"GrandSlamForce",
)
# Used by RuleCompiler._compile_annotations.
implies_artificial = set([value for value in annotations if value > annotations.Artificial])
def did_bid_annotation(suit):
return (
annotations.BidClubs,
annotations.BidDiamonds,
annotations.BidHearts,
annotations.BidSpades,
)[suit.index]
# FIXME: Consider adding a CallPrecondition and HistoryPrecondition subclasses
# which could then easily be filtered to the front of the preconditions list
# for faster matching, or asserting about unreachable call_names, etc.
class Precondition(object):
repr_name = None
def __repr__(self):
name = self.repr_name or self.__class__.__name__
return "%s(%s)" % (name, ", ".join(map(repr, self.repr_args)))
@property
def repr_args(self):
return []
def fits(self, history, call):
raise NotImplementedError
class InvertedPrecondition(Precondition):
repr_name = "Not"
def __init__(self, precondition):
self.precondition = precondition
@property
def repr_args(self):
return [self.precondition]
def fits(self, history, call):
return not self.precondition.fits(history, call)
class SummaryPrecondition(Precondition):
def __init__(self, *preconditions):
self.preconditions = preconditions
@property
def repr_args(self):
return self.preconditions
class EitherPrecondition(SummaryPrecondition):
repr_name = "Either"
def fits(self, history, call):
return any(precondition.fits(history, call) for precondition in self.preconditions)
class AndPrecondition(SummaryPrecondition):
repr_name = "And"
def fits(self, history, call):
return all(precondition.fits(history, call) for precondition in self.preconditions)
class NoOpening(Precondition):
def fits(self, history, call):
return annotations.Opening not in history.annotations
class Opened(Precondition):
def __init__(self, position):
self.position = position
@property
def repr_args(self):
return [self.position.key]
def fits(self, history, call):
return annotations.Opening in history.annotations_for_position(self.position)
class TheyOpened(Precondition):
def fits(self, history, call):
return annotations.Opening in history.them.annotations
# FIXME: Rename to NotrumpOpeningBook?
class NotrumpSystemsOn(Precondition):
def fits(self, history, call):
return annotations.NotrumpSystemsOn in history.us.annotations
class OneLevelSuitedOpeningBook(Precondition):
def fits(self, history, call):
return annotations.OneLevelSuitOpening in history.us.annotations
class StrongTwoClubOpeningBook(Precondition):
def fits(self, history, call):
return annotations.StrongTwoClubOpening in history.us.annotations
class HasBid(Precondition):
def __init__(self, position):
self.position = position
@property
def repr_args(self):
return [self.position.key]
def fits(self, history, call):
for view in history.view_for(self.position).walk:
if view.last_call and not view.last_call.is_pass():
return True
return False
class ForcedToBid(Precondition):
def fits(self, history, call):
# preconditions.py depends on forcing.py, but forcing.py needs to know annotations.
from forcing import SAYCForcingOracle
return SAYCForcingOracle().forced_to_bid(history)
class IsGame(Precondition):
def _game_level(self, strain):
if strain in suit.MINORS:
return 5
if strain in suit.MAJORS:
return 4
return 3
def fits(self, history, call):
return call.is_contract() and call.level == self._game_level(bid.strain)
class LastBidWasBelowGame(IsGame):
def fits(self, history, call):
last_contract = history.last_contract
return last_contract.level < self._game_level(last_contract.strain)
class LastBidWasGameOrAbove(IsGame):
def fits(self, history, call):
last_contract = history.last_contract
return last_contract.level >= self._game_level(last_contract.strain)
class LastBidWasBelowSlam(Precondition):
def fits(self, history, call):
last_contract = history.last_contract
return last_contract.level < 6
class LastBidHasAnnotation(Precondition):
def __init__(self, position, annotation):
self.position = position
self.annotation = annotation
# This assert is likely incompatible with module based development, but is nice for catching typos.
assert self.annotation in annotations
@property
def repr_args(self):
return [self.position.key, self.annotation.key]
def fits(self, history, call):
return self.annotation in history.view_for(self.position).annotations_for_last_call
class LastBidHasStrain(Precondition):
def __init__(self, position, strain_or_strains):
self.position = position
if strain_or_strains in suit.STRAINS:
self.strains = [strain_or_strains]
else:
self.strains = strain_or_strains
@property
def repr_args(self):
return [self.position.key, self.strains]
def fits(self, history, call):
last_call = history.view_for(self.position).last_call
return last_call and last_call.strain in self.strains
class LastBidHasSuit(Precondition):
def __init__(self, position=None):
self.position = position
@property
def repr_args(self):
position_string = repr(self.position.key) if self.position else None
return [position_string]
def fits(self, history, call):
last_call = history.last_contract if not self.position else history.view_for(self.position).last_call
return last_call and last_call.strain in suit.SUITS
class LastBidHasLevel(Precondition):
def __init__(self, position, level):
self.position = position
self.level = level
@property
def repr_args(self):
return [self.position.key, self.level]
def fits(self, history, call):
last_call = history.view_for(self.position).last_call
return last_call and last_call.level == self.level
class LastBidWas(Precondition):
def __init__(self, position, call_name):
self.position = position
self.call_name = call_name
@property
def repr_args(self):
return [self.position.key, self.call_name]
def fits(self, history, call):
last_call = history.view_for(self.position).last_call
return last_call and last_call.name == self.call_name
class RaiseOfPartnersLastSuit(Precondition):
def fits(self, history, call):
partner_last_call = history.partner.last_call
if not partner_last_call or partner_last_call.strain not in suit.SUITS:
return False
return call.strain == partner_last_call.strain and history.partner.min_length(partner_last_call.strain) >= 3
class CueBid(Precondition):
def __init__(self, position, use_first_suit=False):
self.position = position
self.use_first_suit = use_first_suit
def fits(self, history, call):
if self.use_first_suit:
target_call = None
for view in history.view_for(self.position).walk:
target_call = view.last_call
else:
target_call = history.view_for(self.position).last_call
if not target_call or target_call.strain not in suit.SUITS:
return False
return call.strain == target_call.strain and history.view_for(self.position).min_length(target_call.strain) >= 3
class SuitLowerThanMyLastSuit(Precondition):
def fits(self, history, call):
if call.strain not in suit.SUITS:
return False
last_call = history.me.last_call
if last_call.strain not in suit.SUITS:
return False
return call.strain < last_call.strain
class RebidSameSuit(Precondition):
def fits(self, history, call):
if call.strain not in suit.SUITS:
return False
return history.me.last_call and call.strain == history.me.last_call.strain and call.strain in history.me.bid_suits
class PartnerHasAtLeastLengthInSuit(Precondition):
def __init__(self, length):
self.length = length
@property
def repr_args(self):
return [self.length]
def fits(self, history, call):
if call.strain not in suit.SUITS:
return False
return history.partner.min_length(call.strain) >= self.length
class MaxShownLength(Precondition):
def __init__(self, position, max_length, suit=None):
self.position = position
self.max_length = max_length
self.suit = suit
@property
def repr_args(self):
return [self.position.key, self.max_length, self.suit]
def fits(self, history, call):
strain = call.strain if self.suit is None else self.suit
return strain in suit.SUITS and history.view_for(self.position).min_length(strain) <= self.max_length
class DidBidSuit(Precondition):
def __init__(self, position):
self.position = position
def fits(self, history, call):
if call.strain not in suit.SUITS:
return False
return history.is_bid_suit(call.strain, self.position)
class UnbidSuit(Precondition):
def fits(self, history, call):
if call.strain not in suit.SUITS:
return False
return history.is_unbid_suit(call.strain)
class SuitUnbidByOpponents(Precondition):
def fits(self, history, call):
if call.strain not in suit.SUITS:
return False
return call.strain in history.them.unbid_suits
class UnbidSuitCountRange(Precondition):
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
@property
def repr_args(self):
return [self.lower, self.upper]
def fits(self, history, call):
count = len(history.unbid_suits)
return count >= self.lower and count <= self.upper
class Strain(Precondition):
def __init__(self, strain):
self.strain = strain
@property
def repr_args(self):
return [self.strain]
def fits(self, history, call):
return call.strain == self.strain
class Level(Precondition):
def __init__(self, level):
self.level = level
@property
def repr_args(self):
return [self.level]
def fits(self, history, call):
if call.is_double():
return history.last_contract.level == self.level
return call.is_contract() and call.level == self.level
class MaxLevel(Precondition):
def __init__(self, max_level):
self.max_level = max_level
@property
def repr_args(self):
return [self.max_level]
def fits(self, history, call):
if call.is_double():
return history.last_contract.level <= self.max_level
return call.is_contract() and call.level <= self.max_level
class HaveFit(Precondition):
def fits(self, history, call):
for strain in suit.SUITS:
if history.partner.min_length(strain) + history.me.min_length(strain) >= 8:
return True
return False
class Jump(Precondition):
def __init__(self, exact_size=None):
self.exact_size = exact_size
@property
def repr_args(self):
return [self.exact_size]
def _jump_size(self, last_call, call):
if call.strain <= last_call.strain:
# If the new suit is less than the last bid one, than we need to change more than one level for it to be a jump.
return call.level - last_call.level - 1
# Otherwise any bid not at the current level is a jump.
return call.level - last_call.level
def fits(self, history, call):
if call.is_pass():
return False
if call.is_double() or call.is_redouble():
call = history.call_history.last_contract()
last_call = self._last_call(history)
if not last_call or not last_call.is_contract(): # If we don't have a previous bid to compare to, this can't be a jump.
return False
jump_size = self._jump_size(last_call, call)
if self.exact_size is None:
return jump_size != 0
return self.exact_size == jump_size
def _last_call(self, history):
raise NotImplementedError
class JumpFromLastContract(Jump):
def _last_call(self, history):
return history.call_history.last_contract()
class JumpFromMyLastBid(Jump):
def _last_call(self, history):
return history.me.last_call
class JumpFromPartnerLastBid(Jump):
def _last_call(self, history):
return history.partner.last_call
class NotJumpFromLastContract(JumpFromLastContract):
def __init__(self):
JumpFromLastContract.__init__(self, exact_size=0)
class NotJumpFromMyLastBid(JumpFromMyLastBid):
def __init__(self):
JumpFromMyLastBid.__init__(self, exact_size=0)
class NotJumpFromPartnerLastBid(JumpFromPartnerLastBid):
def __init__(self):
JumpFromPartnerLastBid.__init__(self, exact_size=0)
|
|
#! /usr/bin/env python
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
from ConfigParser import SafeConfigParser
from datetime import timedelta
from datetime import datetime
from urllib2 import urlopen
import HTMLParser
import urlparse
import urllib2
import urllib
import random
import time
import sys
import os
import re
try:
import json
except ImportError:
import simplejson as json
class BDBase(object):
"""
Base class that outlines our BassDrive Plugin Components
"""
def __init__(self):
# Load config.ini
self.bd_config = SafeConfigParser()
self.bd_config.read(os.path.join(os.path.dirname(__file__), "config.ini"))
# Plugin Constants & Profile Path
self.bd_addon = xbmcaddon.Addon(id=self.bd_config.get('plugin', 'id'))
self.bd_ppath = xbmc.translatePath(self.bd_addon.getAddonInfo('profile')).decode('utf-8')
self.bd_handle = int(sys.argv[1])
self.base_url = sys.argv[0]
# Mode Arguments
self.args = urlparse.parse_qs(sys.argv[2][1:])
self.mode = urlparse.parse_qs(sys.argv[2][1:]).get('mode', None)
# Ensure our Cache directory exists
self.cachedir = os.path.join(self.bd_ppath, 'cache')
if not os.path.exists(self.cachedir):
os.makedirs(self.cachedir)
def log(self, msg):
xbmc.log("[Bassdrive Plugin] %s" % (msg), xbmc.LOGNOTICE)
def error(self, message):
adname = self.bd_addon.getAddonInfo('name')
xbmcgui.Dialog().ok(adname, message)
def cache_file_expired(self, filepath, days=7):
"""
Super simple function that returns a boolean
Args:
days (int) The number of days old the file can be before its considdered expired
filepath (str) Full filepath of our cache file
Return:
True if the cachefile is expired
False if the cachefile is not expired, or file not exist
"""
self.log("Checking to see if `%s` cache file has expired" % filepath)
if os.path.exists(filepath):
tstamp = time.ctime(os.path.getmtime(filepath))
# There's an issue with .. I'm not 100% sure what. The datetime lib on stray versions of python?
# http://forum.kodi.tv/showthread.php?tid=112916&pid=1212394#pid1212394
try:
tstamp = datetime.strptime(tstamp, '%a %b %d %H:%M:%S %Y')
except TypeError:
tstamp = datetime.fromtimestamp(time.mktime(time.strptime(tstamp, '%a %b %d %H:%M:%S %Y')))
if tstamp > datetime.utcnow() + timedelta(days=days):
self.log("Cache file %s has expired" % filepath)
return True
self.log("Cache file %s has NOT expired" % filepath)
return False
self.log("Cache file %s does not exist! Returning as if expired" % filepath)
return True
def load_cache_file(self, filepath):
"""
Load a json cache file and return its object
Args:
file (str) Full filepath of our cache file
ReturnL
object Loaded object
False Error/Exception
"""
self.log("Loading cache file %s" % filepath)
try:
with open(filepath) as handle:
cache = json.load(handle)
return cache
except Exception as e:
self.log(e.message)
return False
class BassDrive(BDBase):
def __init__(self):
# Initialize our parent
super(self.__class__, self).__init__()
# Cache file infos
self.cachefile = self.bd_config.get('cachefiles', 'streams')
self.arcachefile = self.bd_config.get('cachefiles', 'archives')
self.cache_streams_path = os.path.join(self.cachedir, self.cachefile)
self.arcache_streams_path = os.path.join(self.cachedir, self.arcachefile)
def build_xbmc_url(self, url):
"""
Given a dict, urlencode it to give us a 'path' that XBMC will understand
"""
return self.base_url + '?' + urllib.urlencode(url)
def update_streams(self):
"""
Get all of the m3u files from BassDrive
parse them
shove them into our json cache, with format
{
'32k' : [ url1, url2, ... ],
'56k' : [ url1, url2, ... ],
'128k' : [ url1, url2, ... ]
}
"""
def get_m3us(url):
try:
data = urlopen(url)
return [line.strip() for line in data]
except Exception as e:
self.log(e.message)
self.error(e.message)
return False
self.log("Pulling m3u's from bassdrive.com and building our stream cache")
streams = {}
for key, url in self.bd_config.items('streams'):
urls = get_m3us(url)
if not urls:
continue
streams[key] = urls
self.log("Writing stream cache to file: %s" % self.cache_streams_path)
with open(self.cache_streams_path, 'w+') as handle:
json.dump(streams, handle)
return True
def update_archives(self):
"""
- Parse bassdrive archives
- Build dict of structure
- Write json to disk
The datastructure returned looks like this
{
u'1 - Monday': {
u'Deep In The Jungle - Illusionist and Darm': {
'_files': [u'[2014.01.20] Deep In The Jungle - The Illusionist and Darm.mp3', ... ]
},
u'Fokuz Recordings Show': {
'_files': [u'[2016.01.11] Fokuz Recordings Show - SATL.mp3', ... ]
] ...
u'2 - Tuesday': {
...
...
}
We opted to go with a data structure like this as it gives us quite a bit of flexibility
Additionally, it allows us to track any level and mix of files and folders in a logical
and easily accessed format. The tl;dr version is that _all_ key names are folder names,
with the exception of the '_files' keyname, which is an explicit list of files contained
in that folder. In the example structure above, '_files' under the 'Deep In the Jungle' key
are all of the files contained in the 'Deel In The Jungle' folder.
"""
self.log("Building object of all archives from archives.bassdrive.com and writing cache file")
def recursive_fetch(url):
results = {}
# We don't want to be going back upa level
blacklisted_labels = [ 'Parent Directory' ]
# Regex that we're searching for in our html
anchor_re = re.compile('<a href=\".*</a>')
hrefs = re.compile('(?<=href=\"(?!http)(?!/\")).*(?=\">)')
text = re.compile('(?<=\">).*(?=</a>)')
pars = HTMLParser.HTMLParser()
url = pars.unescape(url)
urlpath = urllib2.urlopen(url)
req_data = urlpath.read().decode('utf-8')
# Get all of our named anchors
anchors = anchor_re.findall(req_data)
# Traverse our anchors / web structure
for item in anchors:
# separate href value from the label of the anchor and strip all leading/trailing whitespace
try:
url_path = re.search(hrefs, item).group(0).strip()
url_label = re.search(text, item).group(0).strip()
# Handle edge cases, like when nothing matches
except:
continue
# Avoid infinite recursion
if url_label in blacklisted_labels:
continue
# If the path doesn't end in a slash, it's a file
if re.search('/$', url_path) is None:
if not '_files' in results:
results['_files'] = []
results['_files'].append(url_label)
else:
# Make our directory name .. not url encoded
dir_name = urllib.unquote(url_path).replace("/", "")
# Get this folders contents, and add a new folder to results if there is content in it
dir_contents = recursive_fetch(url + url_path)
if len(dir_contents) > 0:
results[dir_name] = dir_contents
return results
# Doing the whole structure under the 'Archive' key is a short-cut for us, so our fetch method is simple
results = {'Archives':recursive_fetch('http://archives.bassdrivearchive.com')}
with open(self.arcache_streams_path, 'w+') as handle:
json.dump(results, handle)
def get_archives_display_page(self, foldername):
"""
Return a list that contains folders and files found in the foldername
Params:
foldername (list) The result of self.args['foldername'].split('/')
encountered during self.run() This list is the key tree
that gets us to the current folder we're looking at
Return:
list, in format as follows
[
[file1, file2, file3],
[foldername1, foldername2]
]
"""
"""
Get the dict for the passed nested value
eg: if we pass foldername = ['1 - Monday', 'Fokuz Recordings Show']
we'll get a dictionary of {'_files':[...]} back
"""
data = reduce(lambda d, k: d[k], foldername, self.load_cache_file(self.arcache_streams_path) )
ret = [[]]
if '_files' in data:
ret[0] = data['_files']
del(data['_files'])
ret.append(data.keys())
return ret
def get_stream_to_play(self, quality):
""" Return a random URL for a given bitrate requested to play
:param quality: string of bitrate we're after, as a keyname in our json cachefile
:return str: A URL to be played :D
"""
self.log("Getting random %s stream to build 'playlist' with" % quality)
cache = self.load_cache_file(self.cache_streams_path)
return random.choice(cache[quality])
def get_archive_url(self, foldername, filename):
"""
Built a full URL to a file in the Archives
Params:
foldername (list) The result of self.args['foldername'].split('/')
encountered during self.run() This list is the key tree
that gets us to the current folder we're looking at
filename (str) The actual filename we're after
Return:
str URL encoded string we can stream from directly
"""
if foldername[0] == 'Archives':
del(foldername[0])
url = 'http://archives.bassdrivearchive.com/' + urllib.quote('/'.join(foldername) + '/' + filename)
self.log('Built archive URL %s' % url)
return url
def maintenance_stream_cache(self):
"""
Convienience function we call from run() to keep run tidy
Checks if our stream cache exists, if it needs to be updated, etc
"""
cachedays = int(self.bd_addon.getSetting("stream_cache_expiry_days"))
# Ensure file exists / not expired. This returns as expired if the file doesn't exist!
if self.cache_file_expired(filepath=self.cache_streams_path, days=cachedays) \
or self.bd_addon.getSetting("forceupdate") == "true":
self.bd_addon.setSetting(id="forceupdate", value="false")
self.log("Maintenance request to update stream cache")
self.update_streams()
return
def maintenance_archive_cache(self):
"""
Convienience function we call from run() to keep run tidy
Checks if our archives cache exists, if it needs to be updated, etc
"""
cachedays = int(self.bd_addon.getSetting("archives_cache_expiry_days"))
# Ensure file exists / not expired. This returns as expired if the file doesn't exist!
if self.cache_file_expired(filepath=self.arcache_streams_path, days=cachedays) \
or self.bd_addon.getSetting("archives_forceupdate") == "true":
self.bd_addon.setSetting(id="archives_forceupdate", value="false")
self.log("Maintenance request to update archive cache")
self.update_archives()
def run(self):
self.log(self.args)
# Check to see if our cache has expired
self.maintenance_stream_cache()
# List of values we're to display in the menu we're on
directory_items = []
# We're at the top level menu
# - Bassdrive @32k
# - Bassdrive @56k
# - Bassdrive @128k
# - Archives
if self.mode is None:
# Build playable bitrate menu items
for key, _x in self.bd_config.items('streams'):
# This currently displays a menu item with a bound URL behind it
# Ideally, we wouldn't generate a URL until the item was selected
# This would allow multiple clicks to get multiple stream URLs,
# basically letting you cycle through streams without having to reload
# the bassdrive plugin
url = self.get_stream_to_play(key)
# Generate a list item for Kodi
li = xbmcgui.ListItem(label="Bassdrive @ %s" % key, thumbnailImage="%s" % os.path.join(self.bd_ppath,
'icon.png'))
# Set our stream quality, per Bassdrives website
li.setProperty("mimetype", "audio/aac")
if key == '128k':
li.setProperty("mimetype", "audio/mpeg")
# Set player info
li.setInfo(type="Music", infoLabels={"Genre": "Drum & Bass",
"Comment": "World Wide Drum & Bass",
"Size": int(key[:-1]) * 1024})
li.setProperty("IsPlayable", "true")
isFolder=False
directory_items.append((url, li, isFolder))
xbmcplugin.addDirectoryItem(handle=self.bd_handle, url=url, listitem=li, isFolder=False)
# Add in our 'Archives' folder menu item
archive_url = self.build_xbmc_url({'mode': 'folder', 'foldername': 'Archives'})
test = xbmcgui.ListItem(label="Archives")
xbmcplugin.addDirectoryItem(handle=self.bd_handle, url=archive_url, listitem=test, isFolder=True)
xbmcplugin.endOfDirectory(self.bd_handle, succeeded=True)
# We're in a sub-menu
elif self.mode[0] == 'folder':
# Handle our archive cache, since we're now in it
self.maintenance_archive_cache()
# The path of the directory that called for this folder relative to the pugins root (./)
calling_folder = self.args['foldername'][0]
# Create a list of the full filepath of our current run
self.foldername = self.args['foldername'][0].split('/')
# Get our files and folders to display
display_data = self.get_archives_display_page(self.foldername)
# Display files/streams to play
for playable in sorted(display_data[0]):
# Build our URL and add the item!
url = self.get_archive_url(foldername=self.foldername, filename=playable)
# Generate a list item for Kodi
li = xbmcgui.ListItem(label=playable, thumbnailImage="%s" % os.path.join(self.bd_ppath,'icon.png'))
# Set player info
li.setInfo(type="Music", infoLabels={"Genre": "Drum & Bass",
"Comment": "World Wide Drum & Bass" })
li.setProperty("IsPlayable", "true")
directory_items.append((url, li, False))
xbmcplugin.addDirectoryItem(handle=self.bd_handle, url=url, listitem=li, isFolder=False)
# Display folders
for folder in sorted(display_data[1]):
# Build the relative URL for this item (this is XBMC URL lingo)
archive_url = self.build_xbmc_url({'mode': 'folder', 'foldername': '%s/%s' % (calling_folder, folder) })
item = xbmcgui.ListItem(label=folder)
xbmcplugin.addDirectoryItem(handle=self.bd_handle, url=archive_url, listitem=item, isFolder=True)
# when we're done adding items to the directory, finish drawing it.
xbmcplugin.endOfDirectory(self.bd_handle, succeeded=True)
# Success
return True
MusicAddonInstance = BassDrive()
MusicAddonInstance.run()
|
|
#!/usr/bin/env python
"""
Project-wide application configuration.
DO NOT STORE SECRETS, PASSWORDS, ETC. IN THIS FILE.
They will be exposed to users. Use environment variables instead.
See get_secrets() below for a fast way to access them.
"""
import os
from authomatic.providers import oauth2
from authomatic import Authomatic
"""
NAMES
"""
# Project name to be used in urls
# Use dashes, not underscores!
PROJECT_SLUG = 'piers_actual'
# Project name to be used in file paths
PROJECT_FILENAME = 'piers_actual'
# The name of the repository containing the source
REPOSITORY_NAME = 'piers_actual'
GITHUB_USERNAME = 'aitkend'
REPOSITORY_URL = '[email protected]:%s/%s.git' % (GITHUB_USERNAME, REPOSITORY_NAME)
REPOSITORY_ALT_URL = None # '[email protected]:aitkend/%s.git' % REPOSITORY_NAME'
# Project name used for assets rig
# Should stay the same, even if PROJECT_SLUG changes
ASSETS_SLUG = 'piers_actual'
"""
DEPLOYMENT
"""
PRODUCTION_S3_BUCKET = {
'bucket_name': 'apps.daitken',
'region': 'us-west-1'
}
STAGING_S3_BUCKET = {
'bucket_name': 'apps-staging.daitken',
'region': 'us-west-1'
}
ASSETS_S3_BUCKET = {
'bucket_name': 'assets.daitken',
'region': 'us-west-1'
}
DEFAULT_MAX_AGE = 20
PRODUCTION_SERVERS = ['cron.nprapps.org']
STAGING_SERVERS = ['cron-staging.nprapps.org']
# Should code be deployed to the web/cron servers?
DEPLOY_TO_SERVERS = False
SERVER_USER = 'ubuntu'
SERVER_PYTHON = 'python2.7'
SERVER_PROJECT_PATH = '/home/%s/apps/%s' % (SERVER_USER, PROJECT_FILENAME)
SERVER_REPOSITORY_PATH = '%s/repository' % SERVER_PROJECT_PATH
SERVER_VIRTUALENV_PATH = '%s/virtualenv' % SERVER_PROJECT_PATH
# Should the crontab file be installed on the servers?
# If True, DEPLOY_TO_SERVERS must also be True
DEPLOY_CRONTAB = False
# Should the service configurations be installed on the servers?
# If True, DEPLOY_TO_SERVERS must also be True
DEPLOY_SERVICES = False
UWSGI_SOCKET_PATH = '/tmp/%s.uwsgi.sock' % PROJECT_FILENAME
# Services are the server-side services we want to enable and configure.
# A three-tuple following this format:
# (service name, service deployment path, service config file extension)
SERVER_SERVICES = [
('app', SERVER_REPOSITORY_PATH, 'ini'),
('uwsgi', '/etc/init', 'conf'),
('nginx', '/etc/nginx/locations-enabled', 'conf'),
]
# These variables will be set at runtime. See configure_targets() below
S3_BUCKET = None
S3_BASE_URL = None
S3_DEPLOY_URL = None
SERVERS = []
SERVER_BASE_URL = None
SERVER_LOG_PATH = None
DEBUG = True
"""
COPY EDITING
"""
COPY_GOOGLE_DOC_KEY = '1BPyp6pO3I6Ncllf0Zb46zqe__aBLQjzSHKslwMH9roQ'
COPY_PATH = 'data/copy.xlsx'
### 1mYdd8jp2FKUpkMoy9PHvD0kLcfTuAxqrhKizOCTzW_Y -- border-map COPY
### 0Auf4XJCko_vBdFJ2UVF4RTA0S1ZGd0F3eDJCSmpXaVE -- barkedu template
### 1Ej12L83GhZhvJn0VDO7grcH-Fcx5rS4__CQI1Km_tQY -- piers testing
### 1BPyp6pO3I6Ncllf0Zb46zqe__aBLQjzSHKslwMH9roQ -- piers_actual
"""
SHARING
"""
SHARE_URL = 'http://%s/%s/' % (PRODUCTION_S3_BUCKET['bucket_name'], PROJECT_SLUG)
"""
ADS
"""
# NPR_DFP = {
# 'STORY_ID': '1002',
# 'TARGET': 'homepage',
# 'ENVIRONMENT': 'NPRTEST',
# 'TESTSERVER': 'false'
# }
"""
SERVICES
"""
# NPR_GOOGLE_ANALYTICS = {
# 'ACCOUNT_ID': 'UA-5828686-4',
# 'DOMAIN': PRODUCTION_S3_BUCKET['bucket_name'],
# 'TOPICS': '' # e.g. '[1014,3,1003,1002,1001]'
# }
#VIZ_GOOGLE_ANALYTICS = {
# 'ACCOUNT_ID': 'UA-5828686-75'
# }
# DISQUS_API_KEY = 'tIbSzEhGBE9NIptbnQWn4wy1gZ546CsQ2IHHtxJiYAceyyPoAkDkVnQfCifmCaQW'
# DISQUS_UUID = '23cdb597-f8f7-11e4-87d4-d49a20c1c340'
"""
OAUTH
"""
GOOGLE_OAUTH_CREDENTIALS_PATH = '~/.google_oauth_credentials'
authomatic_config = {
'google': {
'id': 1,
'class_': oauth2.Google,
'consumer_key': os.environ.get('GOOGLE_OAUTH_CLIENT_ID'),
'consumer_secret': os.environ.get('GOOGLE_OAUTH_CONSUMER_SECRET'),
'scope': ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/userinfo.email'],
'offline': True,
},
}
authomatic = Authomatic(authomatic_config, os.environ.get('AUTHOMATIC_SALT'))
"""
Utilities
"""
def get_secrets():
"""
A method for accessing our secrets.
"""
secrets_dict = {}
for k,v in os.environ.items():
if k.startswith(PROJECT_SLUG):
k = k[len(PROJECT_SLUG) + 1:]
secrets_dict[k] = v
return secrets_dict
def configure_targets(deployment_target):
"""
Configure deployment targets. Abstracted so this can be
overriden for rendering before deployment.
"""
global S3_BUCKET
global S3_BASE_URL
global S3_DEPLOY_URL
global SERVERS
global SERVER_BASE_URL
global SERVER_LOG_PATH
global DEBUG
global DEPLOYMENT_TARGET
global DISQUS_SHORTNAME
global ASSETS_MAX_AGE
if deployment_target == 'production':
S3_BUCKET = PRODUCTION_S3_BUCKET
S3_BASE_URL = 'http://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
SERVERS = PRODUCTION_SERVERS
SERVER_BASE_URL = 'http://%s/%s' % (SERVERS[0], PROJECT_SLUG)
SERVER_LOG_PATH = '/var/log/%s' % PROJECT_FILENAME
DISQUS_SHORTNAME = 'npr-news'
DEBUG = False
ASSETS_MAX_AGE = 86400
elif deployment_target == 'staging':
S3_BUCKET = STAGING_S3_BUCKET
S3_BASE_URL = 'http://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
SERVERS = STAGING_SERVERS
SERVER_BASE_URL = 'http://%s/%s' % (SERVERS[0], PROJECT_SLUG)
SERVER_LOG_PATH = '/var/log/%s' % PROJECT_FILENAME
DISQUS_SHORTNAME = 'nprviz-test'
DEBUG = True
ASSETS_MAX_AGE = 20
else:
S3_BUCKET = None
S3_BASE_URL = 'http://127.0.0.1:8000'
S3_DEPLOY_URL = None
SERVERS = []
SERVER_BASE_URL = 'http://127.0.0.1:8001/%s' % PROJECT_SLUG
SERVER_LOG_PATH = '/tmp'
DISQUS_SHORTNAME = 'nprviz-test'
DEBUG = True
ASSETS_MAX_AGE = 20
DEPLOYMENT_TARGET = deployment_target
"""
Run automated configuration
"""
DEPLOYMENT_TARGET = os.environ.get('DEPLOYMENT_TARGET', None)
configure_targets(DEPLOYMENT_TARGET)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.test_utils import assert_almost_equal
import numpy as np
from nose.tools import raises
from copy import deepcopy
import warnings
def test_parameter():
p = gluon.Parameter('weight', shape=(10, 10))
p.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])
assert len(p.list_data()) == 2
assert len(p.list_grad()) == 2
assert p.data(mx.cpu(1)).context == mx.cpu(1)
assert p.data(mx.cpu(0)).shape == (10, 10)
assert p.var().name == 'weight'
p.reset_ctx(ctx=[mx.cpu(1), mx.cpu(2)])
assert p.list_ctx() == [mx.cpu(1), mx.cpu(2)]
def test_paramdict():
params = gluon.ParameterDict('net_')
params.get('weight', shape=(10, 10))
assert list(params.keys()) == ['net_weight']
params.initialize(ctx=mx.cpu())
params.save('test.params')
params.load('test.params', mx.cpu())
def test_parameter_sharing():
class Net(gluon.Block):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.dense0 = nn.Dense(5, in_units=5)
self.dense1 = nn.Dense(5, in_units=5)
def forward(self, x):
return self.dense1(self.dense0(x))
net1 = Net(prefix='net1_')
net2 = Net(prefix='net2_', params=net1.collect_params())
net1.collect_params().initialize()
net2(mx.nd.zeros((3, 5)))
net1.save_params('net1.params')
net3 = Net(prefix='net3_')
net3.load_params('net1.params', mx.cpu())
def test_basic():
model = nn.Sequential()
model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(64, activation='tanh', in_units=256),
nn.Dense(32, in_units=64))
model.add(nn.Activation('relu'))
# symbol
x = mx.sym.var('data')
y = model(x)
assert len(y.list_arguments()) == 7
# ndarray
model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
x = model(mx.nd.zeros((32, 2, 10)))
assert x.shape == (32, 32)
x.wait_to_read()
model.collect_params().setattr('grad_req', 'null')
assert list(model.collect_params().values())[0]._grad is None
model.collect_params().setattr('grad_req', 'write')
assert list(model.collect_params().values())[0]._grad is not None
def test_dense():
model = nn.Dense(128, activation='tanh', in_units=10, flatten=False, prefix='test_')
inputs = mx.sym.Variable('data')
outputs = model(inputs)
assert set(model.collect_params().keys()) == set(['test_weight', 'test_bias'])
assert outputs.list_outputs() == ['test_tanh_fwd_output']
args, outs, auxs = outputs.infer_shape(data=(2, 3, 10))
assert outs == [(2, 3, 128)]
model = nn.Dense(128, activation='relu', in_units=30, flatten=True, prefix='test2_')
inputs = mx.sym.Variable('data')
outputs = model(inputs)
assert set(model.collect_params().keys()) == set(['test2_weight', 'test2_bias'])
assert outputs.list_outputs() == ['test2_relu_fwd_output']
args, outs, auxs = outputs.infer_shape(data=(17, 2, 5, 3))
assert outs == [(17, 128)]
def test_symbol_block():
model = nn.HybridSequential()
model.add(nn.Dense(128, activation='tanh'))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(64, activation='tanh'),
nn.Dense(32, in_units=64))
model.add(nn.Activation('relu'))
model.initialize()
inputs = mx.sym.var('data')
outputs = model(inputs).get_internals()
smodel = gluon.SymbolBlock(outputs, inputs, params=model.collect_params())
assert len(smodel(mx.nd.zeros((16, 10)))) == 14
out = smodel(mx.sym.var('in'))
assert len(out) == len(outputs.list_outputs())
class Net(nn.HybridBlock):
def __init__(self, model):
super(Net, self).__init__()
self.model = model
def hybrid_forward(self, F, x):
out = self.model(x)
return F.add_n(*[i.sum() for i in out])
net = Net(smodel)
net.hybridize()
assert isinstance(net(mx.nd.zeros((16, 10))), mx.nd.NDArray)
inputs = mx.sym.var('data')
outputs = model(inputs)
smodel = gluon.SymbolBlock(outputs, inputs, params=model.collect_params())
net = Net(smodel)
net.hybridize()
assert isinstance(net(mx.nd.zeros((16, 10))), mx.nd.NDArray)
def check_layer_forward(layer, dshape):
layer.collect_params().initialize()
x = mx.nd.ones(shape=dshape)
x.attach_grad()
with mx.autograd.record():
out = layer(x)
out.backward()
np_out = out.asnumpy()
np_dx = x.grad.asnumpy()
layer.hybridize()
x = mx.nd.ones(shape=dshape)
x.attach_grad()
with mx.autograd.record():
out = layer(x)
out.backward()
mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-5, atol=1e-6)
mx.test_utils.assert_almost_equal(np_dx, x.grad.asnumpy(), rtol=1e-5, atol=1e-6)
def test_conv():
layers1d = [
nn.Conv1D(16, 3, in_channels=4),
nn.Conv1D(16, 3, groups=2, in_channels=4),
nn.Conv1D(16, 3, strides=3, groups=2, in_channels=4),
]
for layer in layers1d:
check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2D(16, (3, 4), in_channels=4),
nn.Conv2D(16, (5, 4), in_channels=4),
nn.Conv2D(16, (3, 4), groups=2, in_channels=4),
nn.Conv2D(16, (3, 4), strides=4, in_channels=4),
nn.Conv2D(16, (3, 4), dilation=4, in_channels=4),
nn.Conv2D(16, (3, 4), padding=4, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
layers3d = [
nn.Conv3D(16, (1, 8, 4), in_channels=4, activation='relu'),
nn.Conv3D(16, (5, 4, 3), in_channels=4),
nn.Conv3D(16, (3, 3, 3), groups=2, in_channels=4),
nn.Conv3D(16, 4, strides=4, in_channels=4),
nn.Conv3D(16, (3, 3, 3), padding=4, in_channels=4),
]
for layer in layers3d:
check_layer_forward(layer, (1, 4, 10, 10, 10))
layer = nn.Conv2D(16, (3, 3), layout='NHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 4))
layer = nn.Conv3D(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 10, 4))
def test_deconv():
# layers1d = [
# nn.Conv1DTranspose(16, 3, in_channels=4),
# nn.Conv1DTranspose(16, 3, groups=2, in_channels=4),
# nn.Conv1DTranspose(16, 3, strides=3, groups=2, in_channels=4),
# ]
# for layer in layers1d:
# check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2DTranspose(16, (3, 4), in_channels=4),
nn.Conv2DTranspose(16, (5, 4), in_channels=4),
nn.Conv2DTranspose(16, (3, 4), groups=2, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), dilation=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), padding=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, output_padding=3, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
# layers3d = [
# nn.Conv3DTranspose(16, (1, 8, 4), in_channels=4),
# nn.Conv3DTranspose(16, (5, 4, 3), in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), groups=2, in_channels=4),
# nn.Conv3DTranspose(16, 4, strides=4, in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), padding=4, in_channels=4),
# ]
# for layer in layers3d:
# check_layer_forward(layer, (1, 4, 10, 10, 10))
#
#
# layer = nn.Conv2DTranspose(16, (3, 3), layout='NHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 4))
#
# layer = nn.Conv3DTranspose(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 10, 4))
def test_pool():
layers1d = [
nn.MaxPool1D(),
nn.MaxPool1D(3),
nn.MaxPool1D(3, 2),
nn.AvgPool1D(),
nn.GlobalAvgPool1D(),
]
for layer in layers1d:
check_layer_forward(layer, (1, 2, 10))
layers2d = [
nn.MaxPool2D(),
nn.MaxPool2D((3, 3)),
nn.MaxPool2D(3, 2),
nn.AvgPool2D(),
nn.GlobalAvgPool2D(),
]
for layer in layers2d:
check_layer_forward(layer, (1, 2, 10, 10))
layers3d = [
nn.MaxPool3D(),
nn.MaxPool3D((3, 3, 3)),
nn.MaxPool3D(3, 2),
nn.AvgPool3D(),
nn.GlobalAvgPool3D(),
]
for layer in layers3d:
check_layer_forward(layer, (1, 2, 10, 10, 10))
# test ceil_mode
x = mx.nd.zeros((2, 2, 10, 10))
layer = nn.MaxPool2D(3, ceil_mode=False)
layer.collect_params().initialize()
assert (layer(x).shape==(2, 2, 3, 3))
layer = nn.MaxPool2D(3, ceil_mode=True)
layer.collect_params().initialize()
assert (layer(x).shape==(2, 2, 4, 4))
def test_batchnorm():
layer = nn.BatchNorm(in_channels=10)
check_layer_forward(layer, (2, 10, 10, 10))
def test_reshape():
x = mx.nd.ones((2, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x.reshape((-1,))
x = x + 10
x.backward()
def test_slice():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x[1:3]
x = x + 10
x.backward()
def test_at():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x[1]
x = x + 10
x.backward()
def test_deferred_init():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2)
layer.collect_params().initialize()
layer(x)
def check_split_data(x, num_slice, batch_axis, **kwargs):
res = gluon.utils.split_data(x, num_slice, batch_axis, **kwargs)
assert len(res) == num_slice
mx.test_utils.assert_almost_equal(mx.nd.concat(*res, dim=batch_axis).asnumpy(),
x.asnumpy())
def test_split_data():
x = mx.nd.random.uniform(shape=(128, 33, 64))
check_split_data(x, 8, 0)
check_split_data(x, 3, 1)
check_split_data(x, 4, 1, even_split=False)
check_split_data(x, 15, 1, even_split=False)
try:
check_split_data(x, 4, 1)
except ValueError:
return
assert False, "Should have failed"
def test_flatten():
flatten = nn.Flatten()
x = mx.nd.zeros((3,4,5,6))
assert flatten(x).shape == (3, 4*5*6)
x = mx.nd.zeros((3,6))
assert flatten(x).shape == (3, 6)
x = mx.nd.zeros((3,))
assert flatten(x).shape == (3, 1)
def test_trainer():
def dict_equ(a, b):
assert set(a) == set(b)
for k in a:
assert (a[k].asnumpy() == b[k].asnumpy()).all()
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert (x.data(mx.cpu(1)).asnumpy() == -2).all()
x.lr_mult = 0.5
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert (x.data(mx.cpu(1)).asnumpy() == -4).all()
trainer.save_states('test.states')
states = deepcopy(trainer._kvstore._updater.states) if trainer._update_on_kvstore \
else deepcopy(trainer._updaters[0].states)
trainer.load_states('test.states')
if trainer._update_on_kvstore:
dict_equ(trainer._kvstore._updater.states, states)
assert trainer._optimizer == trainer._kvstore._updater.optimizer
else:
for updater in trainer._updaters:
dict_equ(updater.states, states)
assert trainer._optimizer == trainer._updaters[0].optimizer
def test_block_attr_hidden():
b = gluon.Block()
# regular attributes can change types
b.a = None
b.a = 1
@raises(TypeError)
def test_block_attr_block():
b = gluon.Block()
# regular variables can't change types
b.b = gluon.Block()
b.b = (2,)
@raises(TypeError)
def test_block_attr_param():
b = gluon.Block()
# regular variables can't change types
b.b = gluon.Parameter()
b.b = (2,)
def test_block_attr_regular():
b = gluon.Block()
# set block attribute also sets _children
b.c = gluon.Block()
c2 = gluon.Block()
b.c = c2
assert b.c is c2 and b._children[0] is c2
def test_sequential_warning():
with warnings.catch_warnings(record=True) as w:
b = gluon.nn.Sequential()
b.add(gluon.nn.Dense(20))
b.hybridize()
assert len(w) == 1
def test_global_norm_clip():
x1 = mx.nd.ones((3,3))
x2 = mx.nd.ones((4,4))
norm = gluon.utils.clip_global_norm([x1, x2], 1.0)
assert norm == 5.0
assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5)
assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5)
x3 = mx.nd.array([1.0, 2.0, float('nan')])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
gluon.utils.clip_global_norm([x1, x3], 2.0)
assert len(w) == 1
def test_embedding():
layer = gluon.nn.Embedding(10, 100)
layer.initialize()
x = mx.nd.array([3,4,2,0,1])
with mx.autograd.record():
y = layer(x)
y.backward()
assert (layer.weight.grad()[:5] == 1).asnumpy().all()
assert (layer.weight.grad()[5:] == 0).asnumpy().all()
def test_export():
ctx = mx.context.current_context()
model = gluon.model_zoo.vision.resnet18_v1(
prefix='resnet', ctx=ctx, pretrained=True)
model.hybridize()
data = mx.nd.random.normal(shape=(1, 3, 224, 224))
out = model(data)
model.export('gluon')
module = mx.mod.Module.load('gluon', 0, label_names=None, context=ctx)
module.bind(data_shapes=[('data', data.shape)])
module.forward(mx.io.DataBatch([data], None), is_train=False)
mod_out, = module.get_outputs()
assert_almost_equal(out.asnumpy(), mod_out.asnumpy())
model2 = gluon.model_zoo.vision.resnet18_v1(prefix='resnet', ctx=ctx)
model2.collect_params().load('gluon-0000.params', ctx)
out2 = model2(data)
assert_almost_equal(out.asnumpy(), out2.asnumpy())
def test_hybrid_stale_cache():
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.add(mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=False))
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5)))
net.add(mx.gluon.nn.Flatten())
assert net(mx.nd.ones((2,3,5))).shape == (2, 30)
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.fc1 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=False)
net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=False)
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5)))
net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=True)
net.initialize()
assert net(mx.nd.ones((2,3,5))).shape == (2, 10)
def test_lambda():
net1 = mx.gluon.nn.HybridSequential()
net1.add(nn.Activation('tanh'),
nn.LeakyReLU(0.1))
net2 = mx.gluon.nn.HybridSequential()
op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1)
net2.add(nn.HybridLambda('tanh'),
nn.HybridLambda(op3))
op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1)
net3 = mx.gluon.nn.Sequential()
net3.add(nn.Lambda('tanh'),
nn.Lambda(op4))
input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7))
out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data)
assert_almost_equal(out1.asnumpy(), out2.asnumpy())
assert_almost_equal(out1.asnumpy(), out3.asnumpy())
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Tools and helper functions for abinit calculations"""
import os
import re
import collections
import shutil
import operator
import numpy as np
from fnmatch import fnmatch
from monty.collections import dict2namedtuple
from monty.string import list_strings
from monty.fnmatch import WildCard
from monty.shutil import copy_r
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
import logging
logger = logging.getLogger(__name__)
def as_bool(s):
"""
Convert a string into a boolean.
>>> assert as_bool(True) is True and as_bool("Yes") is True and as_bool("false") is False
"""
if s in (False, True): return s
# Assume string
s = s.lower()
if s in ("yes", "true"):
return True
elif s in ("no", "false"):
return False
else:
raise ValueError("Don't know how to convert type %s: %s into a boolean" % (type(s), s))
class File:
"""
Very simple class used to store file basenames, absolute paths and directory names.
Provides wrappers for the most commonly used functions defined in os.path.
"""
def __init__(self, path):
self._path = os.path.abspath(path)
def __repr__(self):
return "<%s at %s, %s>" % (self.__class__.__name__, id(self), self.path)
def __str__(self):
return "<%s, %s>" % (self.__class__.__name__, self.path)
def __eq__(self, other):
return False if other is None else self.path == other.path
def __ne__(self, other):
return not self.__eq__(other)
@property
def path(self):
"""Absolute path of the file."""
return self._path
@property
def basename(self):
"""File basename."""
return os.path.basename(self.path)
@property
def relpath(self):
"""Relative path."""
try:
return os.path.relpath(self.path)
except OSError:
# current working directory may not be defined!
return self.path
@property
def dirname(self):
"""Absolute path of the directory where the file is located."""
return os.path.dirname(self.path)
@property
def exists(self):
"""True if file exists."""
return os.path.exists(self.path)
@property
def isncfile(self):
"""True if self is a NetCDF file"""
return self.basename.endswith(".nc")
def chmod(self, mode):
"""Change the access permissions of a file."""
os.chmod(self.path, mode)
def read(self):
"""Read data from file."""
with open(self.path, "r") as f:
return f.read()
def readlines(self):
"""Read lines from files."""
with open(self.path, "r") as f:
return f.readlines()
def write(self, string):
"""Write string to file."""
self.make_dir()
with open(self.path, "w") as f:
if not string.endswith("\n"):
return f.write(string + "\n")
else:
return f.write(string)
def writelines(self, lines):
"""Write a list of strings to file."""
self.make_dir()
with open(self.path, "w") as f:
return f.writelines(lines)
def make_dir(self):
"""Make the directory where the file is located."""
if not os.path.exists(self.dirname):
os.makedirs(self.dirname)
def remove(self):
"""Remove the file."""
try:
os.remove(self.path)
except:
pass
def move(self, dst):
"""
Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
"""
shutil.move(self.path, dst)
def get_stat(self):
"""Results from os.stat"""
return os.stat(self.path)
def getsize(self):
"""
Return the size, in bytes, of path.
Return 0 if the file is empty or it does not exist.
"""
if not self.exists: return 0
return os.path.getsize(self.path)
class Directory:
"""
Very simple class that provides helper functions
wrapping the most commonly used functions defined in os.path.
"""
def __init__(self, path):
self._path = os.path.abspath(path)
def __repr__(self):
return "<%s at %s, %s>" % (self.__class__.__name__, id(self), self.path)
def __str__(self):
return self.path
def __eq__(self, other):
return False if other is None else self.path == other.path
def __ne__(self, other):
return not self.__eq__(other)
@property
def path(self):
"""Absolute path of the directory."""
return self._path
@property
def relpath(self):
"""Relative path."""
return os.path.relpath(self.path)
@property
def basename(self):
"""Directory basename."""
return os.path.basename(self.path)
def path_join(self, *p):
"""
Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components will be discarded.
"""
return os.path.join(self.path, *p)
@property
def exists(self):
"""True if file exists."""
return os.path.exists(self.path)
def makedirs(self):
"""
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist.
"""
if not self.exists:
os.makedirs(self.path)
def rmtree(self):
"""Recursively delete the directory tree"""
shutil.rmtree(self.path, ignore_errors=True)
def copy_r(self, dst):
"""
Implements a recursive copy function similar to Unix's "cp -r" command.
"""
return copy_r(self.path, dst)
def clean(self):
"""Remove all files in the directory tree while preserving the directory"""
for path in self.list_filepaths():
try:
os.remove(path)
except:
pass
def path_in(self, file_basename):
"""Return the absolute path of filename in the directory."""
return os.path.join(self.path, file_basename)
def list_filepaths(self, wildcard=None):
"""
Return the list of absolute filepaths in the directory.
Args:
wildcard: String of tokens separated by "|". Each token represents a pattern.
If wildcard is not None, we return only those files that match the given shell pattern (uses fnmatch).
Example:
wildcard="*.nc|*.pdf" selects only those files that end with .nc or .pdf
"""
# Select the files in the directory.
fnames = [f for f in os.listdir(self.path)]
filepaths = filter(os.path.isfile, [os.path.join(self.path, f) for f in fnames])
# Filter using the shell patterns.
if wildcard is not None:
filepaths = WildCard(wildcard).filter(filepaths)
return filepaths
def has_abiext(self, ext, single_file=True):
"""
Returns the absolute path of the ABINIT file with extension ext.
Support both Fortran files and netcdf files. In the later case,
we check whether a file with extension ext + ".nc" is present
in the directory. Returns empty string is file is not present.
Raises:
`ValueError` if multiple files with the given ext are found.
This implies that this method is not compatible with multiple datasets.
"""
if ext != "abo":
ext = ext if ext.startswith('_') else '_' + ext
files = []
for f in self.list_filepaths():
# For the time being, we ignore DDB files in nc format.
if ext == "_DDB" and f.endswith(".nc"): continue
# Ignore BSE text files e.g. GW_NLF_MDF
if ext == "_MDF" and not f.endswith(".nc"): continue
# Ignore DDK.nc files (temporary workaround for v8.8.2 in which
# the DFPT code produces a new file with DDK.nc extension that enters
# into conflict with AbiPy convention.
if ext == "_DDK" and f.endswith(".nc"): continue
if f.endswith(ext) or f.endswith(ext + ".nc"):
files.append(f)
# This should fix the problem with the 1WF files in which the file extension convention is broken
if not files:
files = [f for f in self.list_filepaths() if fnmatch(f, "*%s*" % ext)]
if not files:
return ""
if len(files) > 1 and single_file:
# ABINIT users must learn that multiple datasets are bad!
raise ValueError("Found multiple files with the same extensions:\n %s\n" % files +
"Please avoid using multiple datasets!")
return files[0] if single_file else files
def symlink_abiext(self, inext, outext):
"""
Create a simbolic link (outext --> inext). The file names are implicitly
given by the ABINIT file extension.
Example:
outdir.symlink_abiext('1WF', 'DDK')
creates the link out_DDK that points to out_1WF
Return: 0 if success.
Raise: RuntimeError
"""
infile = self.has_abiext(inext)
if not infile:
raise RuntimeError('no file with extension %s in %s' % (inext, self))
for i in range(len(infile) - 1, -1, -1):
if infile[i] == '_':
break
else:
raise RuntimeError('Extension %s could not be detected in file %s' % (inext, infile))
outfile = infile[:i] + '_' + outext
if os.path.exists(outfile):
if os.path.islink(outfile):
if os.path.realpath(outfile) == infile:
logger.debug("link %s already exists but it's ok because it points to the correct file" % outfile)
return 0
else:
raise RuntimeError("Expecting link at %s already exists but it does not point to %s" % (outfile, infile))
else:
raise RuntimeError('Expecting link at %s but found file.' % outfile)
os.symlink(infile, outfile)
return 0
def rename_abiext(self, inext, outext):
"""Rename the Abinit file with extension inext with the new extension outext"""
infile = self.has_abiext(inext)
if not infile:
raise RuntimeError('no file with extension %s in %s' % (inext, self))
for i in range(len(infile) - 1, -1, -1):
if infile[i] == '_':
break
else:
raise RuntimeError('Extension %s could not be detected in file %s' % (inext, infile))
outfile = infile[:i] + '_' + outext
shutil.move(infile, outfile)
return 0
def copy_abiext(self, inext, outext):
"""Copy the Abinit file with extension inext to a new file withw extension outext"""
infile = self.has_abiext(inext)
if not infile:
raise RuntimeError('no file with extension %s in %s' % (inext, self))
for i in range(len(infile) - 1, -1, -1):
if infile[i] == '_':
break
else:
raise RuntimeError('Extension %s could not be detected in file %s' % (inext, infile))
outfile = infile[:i] + '_' + outext
shutil.copy(infile, outfile)
return 0
def remove_exts(self, exts):
"""
Remove the files with the given extensions. Unlike rmtree, this function preserves the directory path.
Return list with the absolute paths of the files that have been removed.
"""
paths = []
for ext in list_strings(exts):
path = self.has_abiext(ext)
if not path: continue
try:
os.remove(path)
paths.append(path)
except IOError:
logger.warning("Exception while trying to remove file %s" % path)
return paths
def find_last_timden_file(self):
"""
ABINIT produces lots of out_TIM1_DEN files for each step and we need to find the lat
one in order to prepare the restart or to connect other tasks to the structural relaxation.
This function finds all the TIM?_DEN files in self and return a namedtuple (path, step)
where `path` is the path of the last TIM?_DEN file and step is the iteration number.
Returns None if the directory does not contain TIM?_DEN files.
"""
regex = re.compile(r"out_TIM(\d+)_DEN(.nc)?$")
timden_paths = [f for f in self.list_filepaths() if regex.match(os.path.basename(f))]
if not timden_paths: return None
# Build list of (step, path) tuples.
stepfile_list = []
for path in timden_paths:
name = os.path.basename(path)
match = regex.match(name)
step, ncext = match.groups()
stepfile_list.append((int(step), path))
# DSU sort.
last = sorted(stepfile_list, key=lambda t: t[0])[-1]
return dict2namedtuple(step=last[0], path=last[1])
def find_1wf_files(self):
"""
Abinit adds the idir-ipert index at the end of the 1WF file and this breaks the extension
e.g. out_1WF4. This method scans the files in the directories and returns a list of namedtuple
Each named tuple gives the `path` of the 1FK file and the `pertcase` index.
"""
regex = re.compile(r"out_1WF(\d+)(\.nc)?$")
wf_paths = [f for f in self.list_filepaths() if regex.match(os.path.basename(f))]
if not wf_paths: return None
# Build list of (pertcase, path) tuples.
pertfile_list = []
for path in wf_paths:
name = os.path.basename(path)
match = regex.match(name)
pertcase, ncext = match.groups()
pertfile_list.append((int(pertcase), path))
# DSU sort.
pertfile_list = sorted(pertfile_list, key=lambda t: t[0])
return [dict2namedtuple(pertcase=item[0], path=item[1]) for item in pertfile_list]
def find_1den_files(self):
"""
Abinit adds the idir-ipert index at the end of the 1DEN file and this breaks the extension
e.g. out_DEN1. This method scans the files in the directories and returns a list of namedtuple
Each named tuple gives the `path` of the 1DEN file and the `pertcase` index.
"""
regex = re.compile(r"out_DEN(\d+)(\.nc)?$")
den_paths = [f for f in self.list_filepaths() if regex.match(os.path.basename(f))]
if not den_paths: return None
# Build list of (pertcase, path) tuples.
pertfile_list = []
for path in den_paths:
name = os.path.basename(path)
match = regex.match(name)
pertcase, ncext = match.groups()
pertfile_list.append((int(pertcase), path))
# DSU sort.
pertfile_list = sorted(pertfile_list, key=lambda t: t[0])
return [dict2namedtuple(pertcase=item[0], path=item[1]) for item in pertfile_list]
# This dictionary maps ABINIT file extensions to the variables that must be used to read the file in input.
#
# TODO: It would be nice to pass absolute paths to abinit with getden_path
# so that I can avoid creating symbolic links before running but
# the presence of the C-bindings complicates the implementation
# (gfortran SIGFAULTs if I add strings to dataset_type!
_EXT2VARS = {
"DEN": {"irdden": 1},
"WFK": {"irdwfk": 1},
"WFQ": {"irdwfq": 1},
"SCR": {"irdscr": 1},
"QPS": {"irdqps": 1},
"1WF": {"ird1wf": 1},
"1DEN": {"ird1den": 1},
"BSR": {"irdbsreso": 1},
"BSC": {"irdbscoup": 1},
"HAYDR_SAVE": {"irdhaydock": 1},
"DDK": {"irdddk": 1},
"DDB": {},
"DVDB": {},
"GKK": {},
"DKK": {},
}
def irdvars_for_ext(ext):
"""
Returns a dictionary with the ABINIT variables
that must be used to read the file with extension ext.
"""
return _EXT2VARS[ext].copy()
def abi_extensions():
"""List with all the ABINIT extensions that are registered."""
return list(_EXT2VARS.keys())[:]
def abi_splitext(filename):
"""
Split the ABINIT extension from a filename.
"Extension" are found by searching in an internal database.
Returns "(root, ext)" where ext is the registered ABINIT extension
The final ".nc" is included (if any)
>>> assert abi_splitext("foo_WFK") == ('foo_', 'WFK')
>>> assert abi_splitext("/home/guido/foo_bar_WFK.nc") == ('foo_bar_', 'WFK.nc')
"""
filename = os.path.basename(filename)
is_ncfile = False
if filename.endswith(".nc"):
is_ncfile = True
filename = filename[:-3]
known_extensions = abi_extensions()
# This algorith fails if we have two files
# e.g. HAYDR_SAVE, ANOTHER_HAYDR_SAVE
for i in range(len(filename)-1, -1, -1):
ext = filename[i:]
if ext in known_extensions:
break
else:
raise ValueError("Cannot find a registered extension in %s" % filename)
root = filename[:i]
if is_ncfile:
ext += ".nc"
return root, ext
class FilepathFixer:
"""
This object modifies the names of particular output files
produced by ABINIT so that the file extension is preserved.
Having a one-to-one mapping between file extension and data format
is indeed fundamental for the correct behaviour of abinit since:
- We locate the output file by just inspecting the extension
- We select the variables that must be added to the input file
on the basis of the extension specified by the user during
the initialization of the `AbinitFlow`.
Unfortunately, ABINIT developers like to append extra stuff
to the initial extension and therefore we have to call
`FilepathFixer` to fix the output files produced by the run.
Example:
>>> fixer = FilepathFixer()
>>> assert fixer.fix_paths('/foo/out_1WF17') == {'/foo/out_1WF17': '/foo/out_1WF'}
>>> assert fixer.fix_paths('/foo/out_1WF5.nc') == {'/foo/out_1WF5.nc': '/foo/out_1WF.nc'}
"""
def __init__(self):
# dictionary mapping the *official* file extension to
# the regular expression used to tokenize the basename of the file
# To add a new file it's sufficient to add a new regexp and
# a static method _fix_EXTNAME
self.regs = regs = {}
import re
regs["1WF"] = re.compile(r"(\w+_)1WF(\d+)(\.nc)?$")
regs["1DEN"] = re.compile(r"(\w+_)1DEN(\d+)(\.nc)?$")
@staticmethod
def _fix_1WF(match):
root, pert, ncext = match.groups()
if ncext is None: ncext = ""
return root + "1WF" + ncext
@staticmethod
def _fix_1DEN(match):
root, pert, ncext = match.groups()
if ncext is None: ncext = ""
return root + "1DEN" + ncext
def _fix_path(self, path):
for ext, regex in self.regs.items():
head, tail = os.path.split(path)
match = regex.match(tail)
if match:
newtail = getattr(self, "_fix_" + ext)(match)
newpath = os.path.join(head, newtail)
return newpath, ext
return None, None
def fix_paths(self, paths):
"""
Fix the filenames in the iterable paths
Returns:
old2new: Mapping old_path --> new_path
"""
old2new, fixed_exts = {}, []
for path in list_strings(paths):
newpath, ext = self._fix_path(path)
if newpath is not None:
#if ext not in fixed_exts:
# if ext == "1WF": continue
# raise ValueError("Unknown extension %s" % ext)
#print(ext, path, fixed_exts)
#if ext != '1WF':
# assert ext not in fixed_exts
if ext not in fixed_exts:
if ext == "1WF": continue
raise ValueError("Unknown extension %s" % ext)
fixed_exts.append(ext)
old2new[path] = newpath
return old2new
def _bop_not(obj):
"""Boolean not."""
return not bool(obj)
def _bop_and(obj1, obj2):
"""Boolean and."""
return bool(obj1) and bool(obj2)
def _bop_or(obj1, obj2):
"""Boolean or."""
return bool(obj1) or bool(obj2)
def _bop_divisible(num1, num2):
"""Return True if num1 is divisible by num2."""
return (num1 % num2) == 0.0
# Mapping string --> operator.
_UNARY_OPS = {
"$not": _bop_not,
}
_BIN_OPS = {
"$eq": operator.eq,
"$ne": operator.ne,
"$gt": operator.gt,
"$ge": operator.ge,
"$gte": operator.ge,
"$lt": operator.lt,
"$le": operator.le,
"$lte": operator.le,
"$divisible": _bop_divisible,
"$and": _bop_and,
"$or": _bop_or,
}
_ALL_OPS = list(_UNARY_OPS.keys()) + list(_BIN_OPS.keys())
def map2rpn(map, obj):
"""
Convert a Mongodb-like dictionary to a RPN list of operands and operators.
Reverse Polish notation (RPN) is a mathematical notation in which every
operator follows all of its operands, e.g.
3 - 4 + 5 --> 3 4 - 5 +
>>> d = {2.0: {'$eq': 1.0}}
>>> assert map2rpn(d, None) == [2.0, 1.0, '$eq']
"""
rpn = []
for k, v in map.items():
if k in _ALL_OPS:
if isinstance(v, collections.abc.Mapping):
# e.g "$not": {"$gt": "one"}
# print("in op_vmap",k, v)
values = map2rpn(v, obj)
rpn.extend(values)
rpn.append(k)
elif isinstance(v, (list, tuple)):
# e.g "$and": [{"$not": {"one": 1.0}}, {"two": {"$lt": 3}}]}
# print("in_op_list",k, v)
for d in v:
rpn.extend(map2rpn(d, obj))
rpn.append(k)
else:
# Examples
# 1) "$eq"": "attribute_name"
# 2) "$eq"": 1.0
try:
#print("in_otherv",k, v)
rpn.append(getattr(obj, v))
rpn.append(k)
except TypeError:
#print("in_otherv, raised",k, v)
rpn.extend([v, k])
else:
try:
k = getattr(obj, k)
except TypeError:
k = k
if isinstance(v, collections.abc.Mapping):
# "one": {"$eq": 1.0}}
values = map2rpn(v, obj)
rpn.append(k)
rpn.extend(values)
else:
#"one": 1.0
rpn.extend([k, v, "$eq"])
return rpn
def evaluate_rpn(rpn):
"""
Evaluates the RPN form produced my map2rpn.
Returns:
bool
"""
vals_stack = []
for item in rpn:
if item in _ALL_OPS:
# Apply the operator and push to the task.
v2 = vals_stack.pop()
if item in _UNARY_OPS:
res = _UNARY_OPS[item](v2)
elif item in _BIN_OPS:
v1 = vals_stack.pop()
res = _BIN_OPS[item](v1, v2)
else:
raise ValueError("%s not in unary_ops or bin_ops" % str(item))
vals_stack.append(res)
else:
# Push the operand
vals_stack.append(item)
#print(vals_stack)
assert len(vals_stack) == 1
assert isinstance(vals_stack[0], bool)
return vals_stack[0]
class Condition:
"""
This object receives a dictionary that defines a boolean condition whose syntax is similar
to the one used in mongodb (albeit not all the operators available in mongodb are supported here).
Example:
$gt: {field: {$gt: value} }
$gt selects those documents where the value of the field is greater than (i.e. >) the specified value.
$and performs a logical AND operation on an array of two or more expressions (e.g. <expression1>, <expression2>, etc.)
and selects the documents that satisfy all the expressions in the array.
{ $and: [ { <expression1> }, { <expression2> } , ... , { <expressionN> } ] }
Consider the following example:
db.inventory.find( { qty: { $gt: 20 } } )
This query will select all documents in the inventory collection where the qty field value is greater than 20.
Consider the following example:
db.inventory.find( { qty: { $gt: 20 } } )
db.inventory.find({ $and: [ { price: 1.99 }, { qty: { $lt: 20 } }, { sale: true } ] } )
"""
@classmethod
def as_condition(cls, obj):
"""Convert obj into :class:`Condition`"""
if isinstance(obj, cls):
return obj
else:
return cls(cmap=obj)
def __init__(self, cmap=None):
self.cmap = {} if cmap is None else cmap
def __str__(self):
return str(self.cmap)
def __bool__(self):
return bool(self.cmap)
__nonzero__ = __bool__
def __call__(self, obj):
if not self: return True
try:
return evaluate_rpn(map2rpn(self.cmap, obj))
except Exception as exc:
logger.warning("Condition(%s) raised Exception:\n %s" % (type(obj), str(exc)))
return False
class Editor:
"""
Wrapper class that calls the editor specified by the user
or the one specified in the $EDITOR env variable.
"""
def __init__(self, editor=None):
"""If editor is None, $EDITOR is used."""
self.editor = os.getenv("EDITOR", "vi") if editor is None else str(editor)
def edit_files(self, fnames, ask_for_exit=True):
exit_status = 0
for idx, fname in enumerate(fnames):
exit_status = self.edit_file(fname)
if ask_for_exit and idx != len(fnames)-1 and self.user_wants_to_exit():
break
return exit_status
def edit_file(self, fname):
from subprocess import call
retcode = call([self.editor, fname])
if retcode != 0:
import warnings
warnings.warn("Error while trying to edit file: %s" % fname)
return retcode
@staticmethod
def user_wants_to_exit():
"""Show an interactive prompt asking if exit is wanted."""
# Fix python 2.x.
try:
answer = input("Do you want to continue [Y/n]")
except EOFError:
return True
return answer.lower().strip() in ["n", "no"]
class SparseHistogram:
def __init__(self, items, key=None, num=None, step=None):
if num is None and step is None:
raise ValueError("Either num or step must be specified")
from collections import defaultdict, OrderedDict
values = [key(item) for item in items] if key is not None else items
start, stop = min(values), max(values)
if num is None:
num = int((stop - start) / step)
if num == 0: num = 1
mesh = np.linspace(start, stop, num, endpoint=False)
from monty.bisect import find_le
hist = defaultdict(list)
for item, value in zip(items, values):
# Find rightmost value less than or equal to x.
# hence each bin contains all items whose value is >= value
pos = find_le(mesh, value)
hist[mesh[pos]].append(item)
#new = OrderedDict([(pos, hist[pos]) for pos in sorted(hist.keys(), reverse=reverse)])
self.binvals = sorted(hist.keys())
self.values = [hist[pos] for pos in self.binvals]
self.start, self.stop, self.num = start, stop, num
@add_fig_kwargs
def plot(self, ax=None, **kwargs):
"""
Plot the histogram with matplotlib, returns `matplotlib` figure.
"""
ax, fig, plt = get_ax_fig_plt(ax)
yy = [len(v) for v in self.values]
ax.plot(self.binvals, yy, **kwargs)
return fig
class Dirviz:
#file_color = np.array((255, 0, 0)) / 255
#dir_color = np.array((0, 0, 255)) / 255
def __init__(self, top):
#if not os.path.isdir(top):
# raise TypeError("%s should be a directory!" % str(top))
self.top = os.path.abspath(top)
def get_cluster_graph(self, engine="fdp", graph_attr=None, node_attr=None, edge_attr=None):
"""
Generate directory graph in the DOT language. Directories are shown as clusters
.. warning::
This function scans the entire directory tree starting from top so the resulting
graph can be really big.
Args:
engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage']
graph_attr: Mapping of (attribute, value) pairs for the graph.
node_attr: Mapping of (attribute, value) pairs set for all nodes.
edge_attr: Mapping of (attribute, value) pairs set for all edges.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
"""
# https://www.graphviz.org/doc/info/
from graphviz import Digraph
g = Digraph("directory", #filename="flow_%s.gv" % os.path.basename(self.relworkdir),
engine=engine) # if engine == "automatic" else engine)
# Set graph attributes.
#g.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir))
g.attr(label=self.top)
#g.attr(fontcolor="white", bgcolor='purple:pink')
#g.attr(rankdir="LR", pagedir="BL")
#g.attr(constraint="false", pack="true", packMode="clust")
g.node_attr.update(color='lightblue2', style='filled')
#g.node_attr.update(ranksep='equally')
# Add input attributes.
if graph_attr is not None:
fg.graph_attr.update(**graph_attr)
if node_attr is not None:
fg.node_attr.update(**node_attr)
if edge_attr is not None:
fg.edge_attr.update(**edge_attr)
def node_kwargs(path):
return dict(
#shape="circle",
#shape="none",
#shape="plaintext",
#shape="point",
shape="record",
#color=node.color_hex,
fontsize="8.0",
label=os.path.basename(path),
)
edge_kwargs = dict(arrowType="vee", style="solid", minlen="1")
cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2")
# TODO: Write other method without clusters if not walk.
exclude_top_node = False
for root, dirs, files in os.walk(self.top):
if exclude_top_node and root == self.top: continue
cluster_name = "cluster_%s" % root
#print("root", root, cluster_name, "dirs", dirs, "files", files, sep="\n")
with g.subgraph(name=cluster_name) as d:
d.attr(**cluster_kwargs)
d.attr(rank="source" if (files or dirs) else "sink")
d.attr(label=os.path.basename(root))
for f in files:
filepath = os.path.join(root, f)
d.node(filepath, **node_kwargs(filepath))
if os.path.islink(filepath):
# Follow the link and use the relpath wrt link as label.
realp = os.path.realpath(filepath)
realp = os.path.relpath(realp, filepath)
#realp = os.path.relpath(realp, self.top)
#print(filepath, realp)
#g.node(realp, **node_kwargs(realp))
g.edge(filepath, realp, **edge_kwargs)
for dirname in dirs:
dirpath = os.path.join(root, dirname)
#head, basename = os.path.split(dirpath)
new_cluster_name = "cluster_%s" % dirpath
#rank = "source" if os.listdir(dirpath) else "sink"
#g.node(dirpath, rank=rank, **node_kwargs(dirpath))
#g.edge(dirpath, new_cluster_name, **edge_kwargs)
#d.edge(cluster_name, new_cluster_name, minlen="2", **edge_kwargs)
d.edge(cluster_name, new_cluster_name, **edge_kwargs)
return g
|
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ACL2016 Multimodal Machine Translation. Please see this website for more
details: http://www.statmt.org/wmt16/multimodal-task.html#task1
If you use the dataset created for your task, please cite the following paper:
Multi30K: Multilingual English-German Image Descriptions.
@article{elliott-EtAl:2016:VL16,
author = {{Elliott}, D. and {Frank}, S. and {Sima"an}, K. and {Specia}, L.},
title = {Multi30K: Multilingual English-German Image Descriptions},
booktitle = {Proceedings of the 6th Workshop on Vision and Language},
year = {2016},
pages = {70--74},
year = 2016
}
"""
from __future__ import print_function
import os
import six
import tarfile
import gzip
from collections import defaultdict
import paddle.dataset.common
import paddle.compat as cpt
__all__ = [
"train",
"test",
"validation",
"convert",
"fetch",
"get_dict",
]
DATA_URL = ("http://cloud.dlnel.org/filepub/"
"?uuid=46a0808e-ddd8-427c-bacd-0dbc6d045fed")
DATA_MD5 = "0c38be43600334966403524a40dcd81e"
TOTAL_EN_WORDS = 11250
TOTAL_DE_WORDS = 19220
START_MARK = "<s>"
END_MARK = "<e>"
UNK_MARK = "<unk>"
def __build_dict(tar_file, dict_size, save_path, lang):
word_dict = defaultdict(int)
with tarfile.open(tar_file, mode="r") as f:
for line in f.extractfile("wmt16/train"):
line_split = line.strip().split(six.b("\t"))
if len(line_split) != 2: continue
sen = line_split[0] if lang == "en" else line_split[1]
for w in sen.split():
word_dict[w] += 1
with open(save_path, "w") as fout:
fout.write("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK))
for idx, word in enumerate(
sorted(
six.iteritems(word_dict), key=lambda x: x[1],
reverse=True)):
if idx + 3 == dict_size: break
fout.write("%s\n" % (word[0]))
def __load_dict(tar_file, dict_size, lang, reverse=False):
dict_path = os.path.join(paddle.dataset.common.DATA_HOME,
"wmt16/%s_%d.dict" % (lang, dict_size))
if not os.path.exists(dict_path) or (
len(open(dict_path, "rb").readlines()) != dict_size):
__build_dict(tar_file, dict_size, dict_path, lang)
word_dict = {}
with open(dict_path, "rb") as fdict:
for idx, line in enumerate(fdict):
if reverse:
word_dict[idx] = cpt.to_text(line.strip())
else:
word_dict[cpt.to_text(line.strip())] = idx
return word_dict
def __get_dict_size(src_dict_size, trg_dict_size, src_lang):
src_dict_size = min(src_dict_size, (TOTAL_EN_WORDS if src_lang == "en" else
TOTAL_DE_WORDS))
trg_dict_size = min(trg_dict_size, (TOTAL_DE_WORDS if src_lang == "en" else
TOTAL_EN_WORDS))
return src_dict_size, trg_dict_size
def reader_creator(tar_file, file_name, src_dict_size, trg_dict_size, src_lang):
def reader():
src_dict = __load_dict(tar_file, src_dict_size, src_lang)
trg_dict = __load_dict(tar_file, trg_dict_size,
("de" if src_lang == "en" else "en"))
# the indice for start mark, end mark, and unk are the same in source
# language and target language. Here uses the source language
# dictionary to determine their indices.
start_id = src_dict[START_MARK]
end_id = src_dict[END_MARK]
unk_id = src_dict[UNK_MARK]
src_col = 0 if src_lang == "en" else 1
trg_col = 1 - src_col
with tarfile.open(tar_file, mode="r") as f:
for line in f.extractfile(file_name):
line_split = line.strip().split(six.b("\t"))
if len(line_split) != 2:
continue
src_words = line_split[src_col].split()
src_ids = [start_id] + [
src_dict.get(w, unk_id) for w in src_words
] + [end_id]
trg_words = line_split[trg_col].split()
trg_ids = [trg_dict.get(w, unk_id) for w in trg_words]
trg_ids_next = trg_ids + [end_id]
trg_ids = [start_id] + trg_ids
yield src_ids, trg_ids, trg_ids_next
return reader
def train(src_dict_size, trg_dict_size, src_lang="en"):
"""
WMT16 train set reader.
This function returns the reader for train data. Each sample the reader
returns is made up of three fields: the source language word index sequence,
target language word index sequence and next word index sequence.
NOTE:
The original like for training data is:
http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz
paddle.dataset.wmt16 provides a tokenized version of the original dataset by
using moses's tokenization script:
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl
Args:
src_dict_size(int): Size of the source language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
trg_dict_size(int): Size of the target language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
src_lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
Returns:
callable: The train reader.
"""
if src_lang not in ["en", "de"]:
raise ValueError("An error language type. Only support: "
"en (for English); de(for Germany).")
src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size,
src_lang)
return reader_creator(
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/train",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang)
def test(src_dict_size, trg_dict_size, src_lang="en"):
"""
WMT16 test set reader.
This function returns the reader for test data. Each sample the reader
returns is made up of three fields: the source language word index sequence,
target language word index sequence and next word index sequence.
NOTE:
The original like for test data is:
http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/mmt16_task1_test.tar.gz
paddle.dataset.wmt16 provides a tokenized version of the original dataset by
using moses's tokenization script:
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl
Args:
src_dict_size(int): Size of the source language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
trg_dict_size(int): Size of the target language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
src_lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
Returns:
callable: The test reader.
"""
if src_lang not in ["en", "de"]:
raise ValueError("An error language type. "
"Only support: en (for English); de(for Germany).")
src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size,
src_lang)
return reader_creator(
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/test",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang)
def validation(src_dict_size, trg_dict_size, src_lang="en"):
"""
WMT16 validation set reader.
This function returns the reader for validation data. Each sample the reader
returns is made up of three fields: the source language word index sequence,
target language word index sequence and next word index sequence.
NOTE:
The original like for validation data is:
http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz
paddle.dataset.wmt16 provides a tokenized version of the original dataset by
using moses's tokenization script:
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl
Args:
src_dict_size(int): Size of the source language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
trg_dict_size(int): Size of the target language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
src_lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
Returns:
callable: The validation reader.
"""
if src_lang not in ["en", "de"]:
raise ValueError("An error language type. "
"Only support: en (for English); de(for Germany).")
src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size,
src_lang)
return reader_creator(
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/val",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang)
def get_dict(lang, dict_size, reverse=False):
"""
return the word dictionary for the specified language.
Args:
lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
dict_size(int): Size of the specified language dictionary.
reverse(bool): If reverse is set to False, the returned python
dictionary will use word as key and use index as value.
If reverse is set to True, the returned python
dictionary will use index as key and word as value.
Returns:
dict: The word dictionary for the specific language.
"""
if lang == "en": dict_size = min(dict_size, TOTAL_EN_WORDS)
else: dict_size = min(dict_size, TOTAL_DE_WORDS)
dict_path = os.path.join(paddle.dataset.common.DATA_HOME,
"wmt16/%s_%d.dict" % (lang, dict_size))
assert os.path.exists(dict_path), "Word dictionary does not exist. "
"Please invoke paddle.dataset.wmt16.train/test/validation first "
"to build the dictionary."
tar_file = os.path.join(paddle.dataset.common.DATA_HOME, "wmt16.tar.gz")
return __load_dict(tar_file, dict_size, lang, reverse)
def fetch():
"""download the entire dataset.
"""
paddle.v4.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz")
def convert(path, src_dict_size, trg_dict_size, src_lang):
"""Converts dataset to recordio format.
"""
paddle.dataset.common.convert(
path,
train(
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang),
1000,
"wmt16_train")
paddle.dataset.common.convert(
path,
test(
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang),
1000,
"wmt16_test")
paddle.dataset.common.convert(
path,
validation(
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang),
1000,
"wmt16_validation")
|
|
from __future__ import annotations
import contextlib
import enum
import math
import typing
from abc import ABCMeta, abstractmethod
import cairocffi
from libqtile import drawer, pangocffi, utils
from libqtile.command.base import CommandError, CommandObject
from libqtile.log_utils import logger
if typing.TYPE_CHECKING:
from typing import Any
from libqtile import config
from libqtile.command.base import ItemT
from libqtile.core.manager import Qtile
from libqtile.group import _Group
from libqtile.utils import ColorsType
class Core(CommandObject, metaclass=ABCMeta):
painter: Any
supports_restarting: bool = True
@property
@abstractmethod
def name(self) -> str:
"""The name of the backend"""
pass
def _items(self, name: str) -> ItemT:
return None
def _select(self, name, sel):
return None
@abstractmethod
def finalize(self):
"""Destructor/Clean up resources"""
@property
@abstractmethod
def display_name(self) -> str:
pass
@abstractmethod
def setup_listener(self, qtile: Qtile) -> None:
"""Setup a listener for the given qtile instance"""
@abstractmethod
def remove_listener(self) -> None:
"""Setup a listener for the given qtile instance"""
def update_desktops(self, groups: list[_Group], index: int) -> None:
"""Set the current desktops of the window manager"""
@abstractmethod
def get_screen_info(self) -> list[tuple[int, int, int, int]]:
"""Get the screen information"""
@abstractmethod
def grab_key(self, key: config.Key | config.KeyChord) -> tuple[int, int]:
"""Configure the backend to grab the key event"""
@abstractmethod
def ungrab_key(self, key: config.Key | config.KeyChord) -> tuple[int, int]:
"""Release the given key event"""
@abstractmethod
def ungrab_keys(self) -> None:
"""Release the grabbed key events"""
@abstractmethod
def grab_button(self, mouse: config.Mouse) -> int:
"""Configure the backend to grab the mouse event"""
@abstractmethod
def ungrab_buttons(self) -> None:
"""Release the grabbed button events"""
@abstractmethod
def grab_pointer(self) -> None:
"""Configure the backend to grab mouse events"""
@abstractmethod
def ungrab_pointer(self) -> None:
"""Release grabbed pointer events"""
def distribute_windows(self, initial: bool) -> None:
"""Distribute windows to groups. `initial` will be `True` if Qtile just started."""
def warp_pointer(self, x: int, y: int) -> None:
"""Warp the pointer to the given coordinates relative."""
def update_client_list(self, windows_map: dict[int, WindowType]) -> None:
"""Update the list of windows being managed"""
@contextlib.contextmanager
def masked(self):
"""A context manager to suppress window events while operating on many windows."""
yield
def create_internal(self, x: int, y: int, width: int, height: int) -> Internal:
"""Create an internal window controlled by Qtile."""
raise NotImplementedError # Only error when called, not when instantiating class
def flush(self) -> None:
"""If needed, flush the backend's event queue."""
def graceful_shutdown(self):
"""Try to close windows gracefully before exiting"""
def simulate_keypress(self, modifiers: list[str], key: str) -> None:
"""Simulate a keypress with given modifiers"""
def keysym_from_name(self, name: str) -> int:
"""Get the keysym for a key from its name"""
raise NotImplementedError
def cmd_info(self) -> dict:
"""Get basic information about the running backend."""
return {"backend": self.name, "display_name": self.display_name}
@enum.unique
class FloatStates(enum.Enum):
NOT_FLOATING = 1
FLOATING = 2
MAXIMIZED = 3
FULLSCREEN = 4
TOP = 5
MINIMIZED = 6
class _Window(CommandObject, metaclass=ABCMeta):
def __init__(self):
self.borderwidth: int = 0
self.name: str = "<no name>"
self.reserved_space: tuple[int, int, int, int] | None = None
# Window.cmd_static sets this in case it is hooked to client_new to stop the
# Window object from being managed, now that a Static is being used instead
self.defunct: bool = False
@property
@abstractmethod
def wid(self) -> int:
"""The unique window ID"""
@abstractmethod
def hide(self) -> None:
"""Hide the window"""
@abstractmethod
def unhide(self) -> None:
"""Unhide the window"""
@abstractmethod
def kill(self) -> None:
"""Kill the window"""
def get_wm_class(self) -> list | None:
"""Return the class(es) of the window"""
return None
def get_wm_type(self) -> str | None:
"""Return the type of the window"""
return None
def get_wm_role(self) -> str | None:
"""Return the role of the window"""
return None
@property
def can_steal_focus(self):
"""Is it OK for this window to steal focus?"""
return True
def has_fixed_ratio(self) -> bool:
"""Does this window want a fixed aspect ratio?"""
return False
def has_fixed_size(self) -> bool:
"""Does this window want a fixed size?"""
return False
@property
def urgent(self):
"""Whether this window urgently wants focus"""
return False
@property
def opacity(self) -> float:
"""The opacity of this window from 0 (transparent) to 1 (opaque)."""
return self._opacity
@opacity.setter
def opacity(self, opacity: float) -> None:
"""Opacity setter."""
self._opacity = opacity
@abstractmethod
def place(
self,
x,
y,
width,
height,
borderwidth,
bordercolor,
above=False,
margin=None,
respect_hints=False,
):
"""Place the window in the given position."""
def _items(self, name: str) -> ItemT:
return None
def _select(self, name, sel):
return None
@abstractmethod
def info(self) -> dict[str, Any]:
"""
Return information on this window.
Mimimum required keys are:
- name
- x
- y
- width
- height
- group
- id
- wm_class
"""
return {}
def cmd_info(self) -> dict:
"""Return a dictionary of info."""
return self.info()
class Window(_Window, metaclass=ABCMeta):
"""
A regular Window belonging to a client.
Abstract methods are required to be defined as part of a specific backend's
implementation. Non-abstract methods have default implementations here to be shared
across backends.
"""
qtile: Qtile
# If float_x or float_y are None, the window has never floated
float_x: int | None
float_y: int | None
def __repr__(self):
return "Window(name=%r, wid=%i)" % (self.name, self.wid)
@property
@abstractmethod
def group(self) -> _Group | None:
"""The group to which this window belongs."""
@property
def floating(self) -> bool:
"""Whether this window is floating."""
return False
@floating.setter
def floating(self, do_float: bool) -> None:
raise NotImplementedError
@property
def maximized(self) -> bool:
"""Whether this window is maximized."""
return False
@maximized.setter
def maximized(self, do_maximize: bool) -> None:
raise NotImplementedError
@property
def minimized(self) -> bool:
"""Whether this window is minimized."""
return False
@minimized.setter
def minimized(self, do_minimize: bool) -> None:
raise NotImplementedError
@property
def fullscreen(self) -> bool:
"""Whether this window is fullscreened."""
return False
@fullscreen.setter
def fullscreen(self, do_full: bool) -> None:
raise NotImplementedError
@property
def wants_to_fullscreen(self) -> bool:
"""Does this window want to be fullscreen?"""
return False
def match(self, match: config.Match) -> bool:
"""Compare this window against a Match instance."""
return match.compare(self)
@abstractmethod
def focus(self, warp: bool) -> None:
"""Focus this window and optional warp the pointer to it."""
@abstractmethod
def togroup(self, group_name: str | None = None, *, switch_group: bool = False) -> None:
"""Move window to a specified group
Also switch to that group if switch_group is True.
"""
@property
def has_focus(self):
return self == self.qtile.current_window
def has_user_set_position(self) -> bool:
"""Whether this window has user-defined geometry"""
return False
def is_transient_for(self) -> WindowType | None:
"""What window is this window a transient window for?"""
return None
@abstractmethod
def get_pid(self) -> int:
"""Return the PID that owns the window."""
def paint_borders(self, color: ColorsType, width: int) -> None:
"""Paint the window borders with the given color(s) and width"""
@abstractmethod
def cmd_focus(self, warp: bool = True) -> None:
"""Focuses the window."""
def cmd_match(self, *args, **kwargs) -> bool:
return self.match(*args, **kwargs)
@abstractmethod
def cmd_get_position(self) -> tuple[int, int]:
"""Get the (x, y) of the window"""
@abstractmethod
def cmd_get_size(self) -> tuple[int, int]:
"""Get the (width, height) of the window"""
@abstractmethod
def cmd_move_floating(self, dx: int, dy: int) -> None:
"""Move window by dx and dy"""
@abstractmethod
def cmd_resize_floating(self, dw: int, dh: int) -> None:
"""Add dw and dh to size of window"""
@abstractmethod
def cmd_set_position_floating(self, x: int, y: int) -> None:
"""Move window to x and y"""
@abstractmethod
def cmd_set_position(self, x: int, y: int) -> None:
"""
Move floating window to x and y; swap tiling window with the window under the
pointer.
"""
@abstractmethod
def cmd_set_size_floating(self, w: int, h: int) -> None:
"""Set window dimensions to w and h"""
@abstractmethod
def cmd_place(
self, x, y, width, height, borderwidth, bordercolor, above=False, margin=None
) -> None:
"""Place the window with the given position and geometry."""
@abstractmethod
def cmd_toggle_floating(self) -> None:
"""Toggle the floating state of the window."""
@abstractmethod
def cmd_enable_floating(self) -> None:
"""Float the window."""
@abstractmethod
def cmd_disable_floating(self) -> None:
"""Tile the window."""
@abstractmethod
def cmd_toggle_maximize(self) -> None:
"""Toggle the maximize state of the window."""
@abstractmethod
def cmd_toggle_minimize(self) -> None:
"""Toggle the minimize state of the window."""
@abstractmethod
def cmd_toggle_fullscreen(self) -> None:
"""Toggle the fullscreen state of the window."""
@abstractmethod
def cmd_enable_fullscreen(self) -> None:
"""Fullscreen the window"""
@abstractmethod
def cmd_disable_fullscreen(self) -> None:
"""Un-fullscreen the window"""
@abstractmethod
def cmd_bring_to_front(self) -> None:
"""Bring the window to the front"""
def cmd_togroup(
self,
group_name: str | None = None,
groupName: str | None = None, # Deprecated # noqa: N803
switch_group: bool = False,
) -> None:
"""Move window to a specified group
Also switch to that group if `switch_group` is True.
`groupName` is deprecated and will be dropped soon. Please use `group_name`
instead.
"""
if groupName is not None:
logger.warning("Window.cmd_togroup's groupName is deprecated; use group_name")
group_name = groupName
self.togroup(group_name, switch_group=switch_group)
def cmd_toscreen(self, index: int | None = None) -> None:
"""Move window to a specified screen.
If index is not specified, we assume the current screen
Examples
========
Move window to current screen::
toscreen()
Move window to screen 0::
toscreen(0)
"""
if index is None:
screen = self.qtile.current_screen
else:
try:
screen = self.qtile.screens[index]
except IndexError:
raise CommandError("No such screen: %d" % index)
self.togroup(screen.group.name)
def cmd_opacity(self, opacity: float) -> None:
"""Set the window's opacity.
The value must be between 0 and 1 inclusive.
"""
if opacity < 0.1:
self.opacity = 0.1
elif opacity > 1:
self.opacity = 1
else:
self.opacity = opacity
def cmd_down_opacity(self) -> None:
"""Decrease the window's opacity by 10%."""
if self.opacity > 0.2:
# don't go completely clear
self.opacity -= 0.1
else:
self.opacity = 0.1
def cmd_up_opacity(self) -> None:
"""Increase the window's opacity by 10%."""
if self.opacity < 0.9:
self.opacity += 0.1
else:
self.opacity = 1
@abstractmethod
def cmd_kill(self) -> None:
"""Kill the window. Try to be polite."""
@abstractmethod
def cmd_static(
self,
screen: int | None = None,
x: int | None = None,
y: int | None = None,
width: int | None = None,
height: int | None = None,
) -> None:
"""Makes this window a static window, attached to a Screen.
Values left unspecified are taken from the existing window state.
"""
self.defunct = True
def cmd_center(self) -> None:
"""Centers a floating window on the screen."""
if not self.floating:
return
if not (self.group and self.group.screen):
return
screen = self.group.screen
x = (screen.width - self.width) // 2 # type: ignore
y = (screen.height - self.height) // 2 # type: ignore
self.place(
x,
y,
self.width, # type: ignore
self.height, # type: ignore
self.borderwidth,
self.bordercolor, # type: ignore
above=True,
respect_hints=True,
)
class Internal(_Window, metaclass=ABCMeta):
"""An Internal window belonging to Qtile."""
def __repr__(self):
return "Internal(wid=%s)" % self.wid
@abstractmethod
def create_drawer(self, width: int, height: int) -> Drawer:
"""Create a Drawer that draws to this window."""
def process_window_expose(self) -> None:
"""Respond to the window being exposed. Required by X11 backend."""
def process_button_click(self, x: int, y: int, button: int) -> None:
"""Handle a pointer button click."""
def process_button_release(self, x: int, y: int, button: int) -> None:
"""Handle a pointer button release."""
def process_pointer_enter(self, x: int, y: int) -> None:
"""Handle the pointer entering the window."""
def process_pointer_leave(self, x: int, y: int) -> None:
"""Handle the pointer leaving the window."""
def process_pointer_motion(self, x: int, y: int) -> None:
"""Handle pointer motion within the window."""
def process_key_press(self, keycode: int) -> None:
"""Handle a key press."""
class Static(_Window, metaclass=ABCMeta):
"""A window bound to a screen rather than a group."""
screen: config.Screen
x: Any
y: Any
width: Any
height: Any
def __repr__(self):
return "Static(name=%r, wid=%s)" % (self.name, self.wid)
def info(self) -> dict:
"""Return a dictionary of info."""
return dict(
name=self.name,
wm_class=self.get_wm_class(),
x=self.x,
y=self.y,
width=self.width,
height=self.height,
id=self.wid,
)
@abstractmethod
def cmd_bring_to_front(self) -> None:
"""Bring the window to the front"""
WindowType = typing.Union[Window, Internal, Static]
class Drawer:
"""A helper class for drawing to Internal windows.
We stage drawing operations locally in memory using a cairo RecordingSurface before
finally drawing all operations to a backend-specific target.
"""
# We need to track extent of drawing to know when to redraw.
previous_rect: tuple[int, int, int | None, int | None]
current_rect: tuple[int, int, int | None, int | None]
def __init__(self, qtile: Qtile, win: Internal, width: int, height: int):
self.qtile = qtile
self._win = win
self._width = width
self._height = height
self.surface: cairocffi.RecordingSurface
self.ctx: cairocffi.Context
self._reset_surface()
self.mirrors: dict[Drawer, bool] = {}
self.current_rect = (0, 0, 0, 0)
self.previous_rect = (-1, -1, -1, -1)
self._enabled = True
def finalize(self):
"""Destructor/Clean up resources"""
self.surface = None
self.ctx = None
def add_mirror(self, mirror: Drawer):
"""Keep details of other drawers that are mirroring this one."""
self.mirrors[mirror] = False
def reset_mirrors(self):
"""Reset the drawn status of mirrors."""
self.mirrors = {m: False for m in self.mirrors}
@property
def mirrors_drawn(self) -> bool:
"""Returns True if all mirrors have been drawn with the current surface."""
return all(v for v in self.mirrors.values())
@property
def width(self) -> int:
return self._width
@width.setter
def width(self, width: int):
self._width = width
@property
def height(self) -> int:
return self._height
@height.setter
def height(self, height: int):
self._height = height
def _reset_surface(self):
"""This creates a fresh surface and cairo context."""
self.surface = cairocffi.RecordingSurface(
cairocffi.CONTENT_COLOR_ALPHA,
None,
)
self.ctx = self.new_ctx()
def _check_surface_reset(self):
"""
Checks to see if the widget is not being reflected and
then clears RecordingSurface of operations.
"""
if not self.mirrors:
self._reset_surface()
@property
def needs_update(self) -> bool:
# We can't test for the surface's ink_extents here on its own as a completely
# transparent background would not show any extents but we may still need to
# redraw (e.g. if a Spacer widget has changed position and/or size)
# Check if the size of the area being drawn has changed
rect_changed = self.current_rect != self.previous_rect
# Check if draw has content (would be False for completely transparent drawer)
ink_changed = any(not math.isclose(0.0, i) for i in self.surface.ink_extents())
return ink_changed or rect_changed
def paint_to(self, drawer: Drawer) -> None:
drawer.ctx.set_source_surface(self.surface)
drawer.ctx.paint()
self.mirrors[drawer] = True
if self.mirrors_drawn:
self._reset_surface()
self.reset_mirrors()
def _rounded_rect(self, x, y, width, height, linewidth):
aspect = 1.0
corner_radius = height / 10.0
radius = corner_radius / aspect
degrees = math.pi / 180.0
self.ctx.new_sub_path()
delta = radius + linewidth / 2
self.ctx.arc(x + width - delta, y + delta, radius, -90 * degrees, 0 * degrees)
self.ctx.arc(x + width - delta, y + height - delta, radius, 0 * degrees, 90 * degrees)
self.ctx.arc(x + delta, y + height - delta, radius, 90 * degrees, 180 * degrees)
self.ctx.arc(x + delta, y + delta, radius, 180 * degrees, 270 * degrees)
self.ctx.close_path()
def rounded_rectangle(self, x: int, y: int, width: int, height: int, linewidth: int):
self._rounded_rect(x, y, width, height, linewidth)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
def rounded_fillrect(self, x: int, y: int, width: int, height: int, linewidth: int):
self._rounded_rect(x, y, width, height, linewidth)
self.ctx.fill()
def rectangle(self, x: int, y: int, width: int, height: int, linewidth: int = 2):
self.ctx.set_line_width(linewidth)
self.ctx.rectangle(x, y, width, height)
self.ctx.stroke()
def fillrect(self, x: int, y: int, width: int, height: int, linewidth: int = 2):
self.ctx.set_line_width(linewidth)
self.ctx.rectangle(x, y, width, height)
self.ctx.fill()
self.ctx.stroke()
def enable(self):
"""Enable drawing of surface to Internal window."""
self._enabled = True
def disable(self):
"""Disable drawing of surface to Internal window."""
self._enabled = False
def draw(
self,
offsetx: int = 0,
offsety: int = 0,
width: int | None = None,
height: int | None = None,
):
"""
A wrapper for the draw operation.
This draws our cached operations to the Internal window.
If Drawer has been disabled then the RecordingSurface will
be cleared if no mirrors are waiting to copy its contents.
Parameters
==========
offsetx :
the X offset to start drawing at.
offsety :
the Y offset to start drawing at.
width :
the X portion of the canvas to draw at the starting point.
height :
the Y portion of the canvas to draw at the starting point.
"""
if self._enabled:
self._draw(offsetx, offsety, width, height)
# Check to see if RecordingSurface can be cleared.
self._check_surface_reset()
def _draw(
self,
offsetx: int = 0,
offsety: int = 0,
width: int | None = None,
height: int | None = None,
):
"""
This draws our cached operations to the Internal window.
Parameters
==========
offsetx :
the X offset to start drawing at.
offsety :
the Y offset to start drawing at.
width :
the X portion of the canvas to draw at the starting point.
height :
the Y portion of the canvas to draw at the starting point.
"""
def new_ctx(self):
return pangocffi.patch_cairo_context(cairocffi.Context(self.surface))
def set_source_rgb(self, colour: ColorsType, ctx: cairocffi.Context | None = None):
# If an alternate context is not provided then we draw to the
# drawer's default context
if ctx is None:
ctx = self.ctx
if isinstance(colour, list):
if len(colour) == 0:
# defaults to black
ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0)
elif len(colour) == 1:
ctx.set_source_rgba(*utils.rgb(colour[0]))
else:
linear = cairocffi.LinearGradient(0.0, 0.0, 0.0, self.height)
step_size = 1.0 / (len(colour) - 1)
step = 0.0
for c in colour:
linear.add_color_stop_rgba(step, *utils.rgb(c))
step += step_size
ctx.set_source(linear)
else:
ctx.set_source_rgba(*utils.rgb(colour))
def clear(self, colour):
self.set_source_rgb(colour)
self.ctx.rectangle(0, 0, self.width, self.height)
self.ctx.fill()
def textlayout(self, text, colour, font_family, font_size, font_shadow, markup=False, **kw):
"""Get a text layout"""
textlayout = drawer.TextLayout(
self, text, colour, font_family, font_size, font_shadow, markup=markup, **kw
)
return textlayout
def max_layout_size(self, texts, font_family, font_size):
sizelayout = self.textlayout("", "ffffff", font_family, font_size, None)
widths, heights = [], []
for i in texts:
sizelayout.text = i
widths.append(sizelayout.width)
heights.append(sizelayout.height)
return max(widths), max(heights)
def text_extents(self, text):
return self.ctx.text_extents(utils.scrub_to_utf8(text))
def font_extents(self):
return self.ctx.font_extents()
def fit_fontsize(self, heightlimit):
"""Try to find a maximum font size that fits any strings within the height"""
self.ctx.set_font_size(heightlimit)
asc, desc, height, _, _ = self.font_extents()
self.ctx.set_font_size(int(heightlimit * heightlimit / height))
return self.font_extents()
def fit_text(self, strings, heightlimit):
"""Try to find a maximum font size that fits all strings within the height"""
self.ctx.set_font_size(heightlimit)
_, _, _, maxheight, _, _ = self.ctx.text_extents("".join(strings))
if not maxheight:
return 0, 0
self.ctx.set_font_size(int(heightlimit * heightlimit / maxheight))
maxwidth, maxheight = 0, 0
for i in strings:
_, _, x, y, _, _ = self.ctx.text_extents(i)
maxwidth = max(maxwidth, x)
maxheight = max(maxheight, y)
return maxwidth, maxheight
def draw_vbar(self, color, x, y1, y2, linewidth=1):
self.set_source_rgb(color)
self.ctx.move_to(x, y1)
self.ctx.line_to(x, y2)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
def draw_hbar(self, color, x1, x2, y, linewidth=1):
self.set_source_rgb(color)
self.ctx.move_to(x1, y)
self.ctx.line_to(x2, y)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
|
|
import copy
import pickle
import warnings
import pandas as pd
from .tools.normalize import normalize as normalizer
from .tools.reduce import reduce as reducer
from .tools.align import align as aligner
from .tools.format_data import format_data
from ._shared.helpers import convert_text, get_dtype
from .config import __version__
class DataGeometry(object):
"""
Hypertools data object class
A DataGeometry object contains the data, figure handles and transform
functions used to create a plot. Note: this class should not be called
directly, but is used by the `hyp.plot` function to create a plot object.
Parameters
----------
fig : matplotlib.Figure
The matplotlib figure handle for the plot
ax : matplotlib.Axes
The matplotlib axes handle for the plot
line_ani : matplotlib.animation.FuncAnimation
The matplotlib animation handle (if the plot is an animation)
data : list
A list of numpy arrays representing the raw data
xform_data : list
A list of numpy arrays representing the transformed data
reduce : dict
A dictionary containing the reduction model and parameters
align : dict
A dictionary containing align model and parameters
normalize : str
A string representing the kind of normalization
kwargs : dict
A dictionary containing all kwargs passed to the plot function
version : str
The version of the software used to create the class instance
"""
def __init__(self, fig=None, ax=None, line_ani=None, data=None, xform_data=None,
reduce=None, align=None, normalize=None, semantic=None,
vectorizer=None, corpus=None, kwargs=None, version=__version__,
dtype=None):
# matplotlib figure handle
self.fig = fig
# matplotlib axis handle
self.ax = ax
# matplotlib line_ani handle (if its an animation)
self.line_ani = line_ani
# convert to numpy array if text
if isinstance(data, list):
data = list(map(convert_text, data))
self.data = data
self.dtype = get_dtype(data)
# the transformed data
self.xform_data = xform_data
# dictionary of model and model_params
self.reduce = reduce
# 'hyper', 'SRM' or None
self.align = align
# 'within', 'across', 'row' or False
self.normalize = normalize
# text params
self.semantic = semantic
self.vectorizer = vectorizer
self.corpus = corpus
# dictionary of kwargs
self.kwargs = kwargs
# hypertools version
self.version = version
def get_data(self):
"""Return a copy of the data"""
return copy.copy(self.data)
def get_formatted_data(self):
"""Return a formatted copy of the data"""
return format_data(self.data)
# a function to transform new data
def transform(self, data=None):
"""
Return transformed data, or transform new data using the same model
parameters
Parameters
----------
data : numpy array, pandas dataframe or list of arrays/dfs
The data to transform. If no data is passed, the xform_data from
the DataGeometry object will be returned.
Returns
----------
xformed_data : list of numpy arrays
The transformed data
"""
# if no new data passed,
if data is None:
return self.xform_data
else:
formatted = format_data(
data,
semantic=self.semantic,
vectorizer=self.vectorizer,
corpus=self.corpus,
ppca=True)
norm = normalizer(formatted, normalize=self.normalize)
reduction = reducer(
norm,
reduce=self.reduce,
ndims=self.reduce['params']['n_components'])
return aligner(reduction, align=self.align)
# a function to plot the data
def plot(self, data=None, **kwargs):
"""
Plot the data
Parameters
----------
data : numpy array, pandas dataframe or list of arrays/dfs
The data to plot. If no data is passed, the xform_data from
the DataGeometry object will be returned.
kwargs : keyword arguments
Any keyword arguments supported by `hypertools.plot` are also supported
by this method
Returns
----------
geo : hypertools.DataGeometry
A new data geometry object
"""
# import plot here to avoid circular imports
from .plot.plot import plot as plotter
if data is None:
d = copy.copy(self.data)
transform = copy.copy(self.xform_data)
if any([k in kwargs for k in ['reduce', 'align', 'normalize',
'semantic', 'vectorizer', 'corpus']]):
d = copy.copy(self.data)
transform = None
else:
d = data
transform = None
# get kwargs and update with new kwargs
new_kwargs = copy.copy(self.kwargs)
update_kwargs = dict(transform=transform, reduce=self.reduce,
align=self.align, normalize=self.normalize,
semantic=self.semantic, vectorizer=self.vectorizer,
corpus=self.corpus)
new_kwargs.update(update_kwargs)
for key in kwargs:
new_kwargs.update({key : kwargs[key]})
return plotter(d, **new_kwargs)
def save(self, fname, compression=None):
"""
Save method for the data geometry object
The data will be saved as a 'geo' file, which is a dictionary containing
the elements of a data geometry object saved in the hd5 format using
`deepdish`.
Parameters
----------
fname : str
A name for the file. If the file extension (.geo) is not specified,
it will be appended.
"""
if compression is not None:
warnings.warn("Hypertools has switched from deepdish to pickle "
"for saving DataGeomtry objects. 'compression' "
"argument has no effect and will be removed in a "
"future version",
FutureWarning)
# automatically add extension if not present
if not fname.endswith('.geo'):
fname += '.geo'
# can't save/restore matplotlib objects across sessions
curr_fig = self.fig
curr_ax = self.ax
curr_line_ani = self.line_ani
curr_data = self.data
# convert pandas DataFrames to dicts of
# {column_name: list(column_values)} to fix I/O compatibility
# issues across certain pandas versions. Expected self.data
# format is restored by hypertools.load
if isinstance(curr_data, pd.DataFrame):
data_out_fmt = curr_data.to_dict('list')
else:
data_out_fmt = curr_data
try:
self.fig = self.ax = self.line_ani = None
self.data = data_out_fmt
# save
with open(fname, 'wb') as f:
pickle.dump(self, f)
finally:
# make sure we don't mutate attribute values whether or not
# save was successful
self.fig = curr_fig
self.ax = curr_ax
self.line_ani = curr_line_ani
self.data = curr_data
|
|
"""
Adaptive numerical evaluation of SymPy expressions, using mpmath
for mathematical functions.
"""
from sympy.mpmath.libmpf import (from_int, from_rational, fzero, normalize,
bitcount, round_nearest, to_str, fone, fnone, fhalf, to_float,
from_float, fnone, to_int, mpf_lt, mpf_sqrt, mpf_cmp, mpf_abs,
mpf_pow_int, mpf_shift, mpf_add, mpf_mul, mpf_neg)
import sympy.mpmath.libmpc as libmpc
from sympy.mpmath.settings import dps_to_prec
from sympy.mpmath import mpf, mpc, quadts, quadosc, mp, make_mpf
from sympy.mpmath.gammazeta import mpf_gamma
from sympy.mpmath.libelefun import mpf_pi, mpf_log, mpf_pow, mpf_sin, mpf_cos, \
mpf_atan, mpf_atan2, mpf_e, mpf_exp
from sympy.mpmath.libmpf import MP_BASE, from_man_exp
from sympy.mpmath.calculus import shanks, richardson, nsum
from sympy.mpmath import inf as mpmath_inf
from sympy.mpmath.gammazeta import mpf_bernoulli
import math
from basic import Basic, C, S
from function import Function
from sympify import sympify
LG10 = math.log(10,2)
# Used in a few places as placeholder values to denote exponents and
# precision levels, e.g. of exact numbers. Must be careful to avoid
# passing these to mpmath functions or returning them in final results.
INF = 1e1000
MINUS_INF = -1e1000
# ~= 100 digits. Real men set this to INF.
DEFAULT_MAXPREC = 333
class PrecisionExhausted(ArithmeticError):
pass
#----------------------------------------------------------------------------#
# #
# Helper functions for arithmetic and complex parts #
# #
#----------------------------------------------------------------------------#
"""
An mpf value tuple is a tuple of integers (sign, man, exp, bc)
representing a floating-point numbers.
A temporary result is a tuple (re, im, re_acc, im_acc) where
re and im are nonzero mpf value tuples representing approximate
numbers, or None to denote exact zeros.
re_acc, im_acc are integers denoting log2(e) where e is the estimated
relative accuracy of the respective complex part, but may be anything
if the corresponding complex part is None.
"""
def fastlog(x):
"""Fast approximation of log2(x) for an mpf value tuple x."""
if not x or x == fzero:
return MINUS_INF
# log2(x) ~= exponent + width of mantissa
# Note: this actually gives ceil(log2(x)), which is a useful
# feature for interval arithmetic.
return x[2] + x[3]
def complex_accuracy(result):
"""
Returns relative accuracy of a complex number with given accuracies
for the real and imaginary parts. The relative accuracy is defined
in the complex norm sense as ||z|+|error|| / |z| where error
is equal to (real absolute error) + (imag absolute error)*i.
The full expression for the (logarithmic) error can be approximated
easily by using the max norm to approximate the complex norm.
In the worst case (re and im equal), this is wrong by a factor
sqrt(2), or by log2(sqrt(2)) = 0.5 bit.
"""
re, im, re_acc, im_acc = result
if not im:
if not re:
return INF
return re_acc
if not re:
return im_acc
re_size = fastlog(re)
im_size = fastlog(im)
absolute_error = max(re_size-re_acc, im_size-im_acc)
relative_error = absolute_error - max(re_size, im_size)
return -relative_error
def get_abs(expr, prec, options):
re, im, re_acc, im_acc = evalf(expr, prec+2, options)
if not re:
re, re_acc, im, im_acc = im, im_acc, re, re_acc
if im:
return libmpc.mpc_abs((re, im), prec), None, re_acc, None
else:
return mpf_abs(re), None, re_acc, None
def get_complex_part(expr, no, prec, options):
"""no = 0 for real part, no = 1 for imaginary part"""
workprec = prec
i = 0
while 1:
res = evalf(expr, workprec, options)
value, accuracy = res[no::2]
if (not value) or accuracy >= prec:
return value, None, accuracy, None
workprec += max(30, 2**i)
i += 1
def evalf_abs(expr, prec, options):
return get_abs(expr.args[0], prec, options)
def evalf_re(expr, prec, options):
return get_complex_part(expr.args[0], 0, prec, options)
def evalf_im(expr, prec, options):
return get_complex_part(expr.args[0], 1, prec, options)
def finalize_complex(re, im, prec):
assert re and im
if re == fzero and im == fzero:
raise ValueError("got complex zero with unknown accuracy")
size_re = fastlog(re)
size_im = fastlog(im)
# Convert fzeros to scaled zeros
if re == fzero:
re = mpf_shift(fone, size_im-prec)
size_re = fastlog(re)
elif im == fzero:
im = mpf_shift(fone, size_re-prec)
size_im = fastlog(im)
if size_re > size_im:
re_acc = prec
im_acc = prec + min(-(size_re - size_im), 0)
else:
im_acc = prec
re_acc = prec + min(-(size_im - size_re), 0)
return re, im, re_acc, im_acc
def chop_parts(value, prec):
"""
Chop off tiny real or complex parts.
"""
re, im, re_acc, im_acc = value
# Method 1: chop based on absolute value
if re and (fastlog(re) < -prec+4):
re, re_acc = None, None
if im and (fastlog(im) < -prec+4):
im, im_acc = None, None
# Method 2: chop if inaccurate and relatively small
if re and im:
delta = fastlog(re) - fastlog(im)
if re_acc < 2 and (delta - re_acc <= -prec+4):
re, re_acc = None, None
if im_acc < 2 and (delta - im_acc >= prec-4):
im, im_acc = None, None
return re, im, re_acc, im_acc
def check_target(expr, result, prec):
a = complex_accuracy(result)
if a < prec:
raise PrecisionExhausted("Failed to distinguish the expression: \n\n%s\n\n"
"from zero. Try simplifying the input, using chop=True, or providing "
"a higher maxprec for evalf" % (expr))
def get_integer_part(expr, no, options, return_ints=False):
"""
With no = 1, computes ceiling(expr)
With no = -1, computes floor(expr)
Note: this function either gives the exact result or signals failure.
"""
# The expression is likely less than 2^30 or so
assumed_size = 30
ire, iim, ire_acc, iim_acc = evalf(expr, assumed_size, options)
# We now know the size, so we can calculate how much extra precision
# (if any) is needed to get within the nearest integer
if ire and iim:
gap = max(fastlog(ire)-ire_acc, fastlog(iim)-iim_acc)
elif ire:
gap = fastlog(ire)-ire_acc
elif iim:
gap = fastlog(iim)-iim_acc
else:
# ... or maybe the expression was exactly zero
return None, None, None, None
margin = 10
if gap >= -margin:
ire, iim, ire_acc, iim_acc = evalf(expr, margin+assumed_size+gap, options)
# We can now easily find the nearest integer, but to find floor/ceil, we
# must also calculate whether the difference to the nearest integer is
# positive or negative (which may fail if very close)
def calc_part(expr, nexpr):
nint = int(to_int(nexpr, round_nearest))
expr = C.Add(expr, -nint, evaluate=False)
x, _, x_acc, _ = evalf(expr, 10, options)
check_target(expr, (x, None, x_acc, None), 3)
nint += int(no*(mpf_cmp(x or fzero, fzero) == no))
nint = from_int(nint)
return nint, fastlog(nint) + 10
re, im, re_acc, im_acc = None, None, None, None
if ire:
re, re_acc = calc_part(C.re(expr, evaluate=False), ire)
if iim:
im, im_acc = calc_part(C.im(expr, evaluate=False), iim)
if return_ints:
return int(to_int(re or fzero)), int(to_int(im or fzero))
return re, im, re_acc, im_acc
def evalf_ceiling(expr, prec, options):
return get_integer_part(expr.args[0], 1, options)
def evalf_floor(expr, prec, options):
return get_integer_part(expr.args[0], -1, options)
#----------------------------------------------------------------------------#
# #
# Arithmetic operations #
# #
#----------------------------------------------------------------------------#
def add_terms(terms, prec, target_prec):
"""
Helper for evalf_add. Adds a list of (mpfval, accuracy) terms.
"""
if len(terms) == 1:
if not terms[0]:
# XXX: this is supposed to represent a scaled zero
return mpf_shift(fone, target_prec), -1
return terms[0]
sum_man, sum_exp, absolute_error = 0, 0, MINUS_INF
for x, accuracy in terms:
if not x:
continue
sign, man, exp, bc = x
if sign:
man = -man
absolute_error = max(absolute_error, bc+exp-accuracy)
delta = exp - sum_exp
if exp >= sum_exp:
if delta > 4*prec:
sum_man = man
sum_exp = exp
else:
sum_man += man << delta
else:
if (-delta) > 4*prec:
pass
else:
sum_man = (sum_man << (-delta)) + man
sum_exp = exp
if absolute_error == MINUS_INF:
return None, None
if not sum_man:
# XXX: this is supposed to represent a scaled zero
return mpf_shift(fone, absolute_error), -1
if sum_man < 0:
sum_sign = 1
sum_man = -sum_man
else:
sum_sign = 0
sum_bc = bitcount(sum_man)
sum_accuracy = sum_exp + sum_bc - absolute_error
r = normalize(sum_sign, sum_man, sum_exp, sum_bc, target_prec,
round_nearest), sum_accuracy
#print "returning", to_str(r[0],50), r[1]
return r
def evalf_add(v, prec, options):
args = v.args
target_prec = prec
i = 0
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
options['maxprec'] = min(oldmaxprec, 2*prec)
try:
while 1:
terms = [evalf(arg, prec+10, options) for arg in args]
re, re_accuracy = add_terms([(a[0],a[2]) for a in terms if a[0]], prec, target_prec)
im, im_accuracy = add_terms([(a[1],a[3]) for a in terms if a[1]], prec, target_prec)
accuracy = complex_accuracy((re, im, re_accuracy, im_accuracy))
if accuracy >= target_prec:
if options.get('verbose'):
print "ADD: wanted", target_prec, "accurate bits, got", re_accuracy, im_accuracy
return re, im, re_accuracy, im_accuracy
else:
diff = target_prec - accuracy
if (prec-target_prec) > options.get('maxprec', DEFAULT_MAXPREC):
return re, im, re_accuracy, im_accuracy
prec = prec + max(10+2**i, diff)
options['maxprec'] = min(oldmaxprec, 2*prec)
if options.get('verbose'):
print "ADD: restarting with prec", prec
i += 1
finally:
options['maxprec'] = oldmaxprec
# Helper for complex multiplication
# XXX: should be able to multiply directly, and use complex_accuracy
# to obtain the final accuracy
def cmul((a, aacc), (b, bacc), (c, cacc), (d, dacc), prec, target_prec):
A, Aacc = mpf_mul(a,c,prec), min(aacc, cacc)
B, Bacc = mpf_mul(mpf_neg(b),d,prec), min(bacc, dacc)
C, Cacc = mpf_mul(a,d,prec), min(aacc, dacc)
D, Dacc = mpf_mul(b,c,prec), min(bacc, cacc)
re, re_accuracy = add_terms([(A, Aacc), (B, Bacc)], prec, target_prec)
im, im_accuracy = add_terms([(C, Cacc), (D, Cacc)], prec, target_prec)
return re, im, re_accuracy, im_accuracy
def evalf_mul(v, prec, options):
args = v.args
# With guard digits, multiplication in the real case does not destroy
# accuracy. This is also true in the complex case when considering the
# total accuracy; however accuracy for the real or imaginary parts
# separately may be lower.
acc = prec
target_prec = prec
# XXX: big overestimate
prec = prec + len(args) + 5
direction = 0
# Empty product is 1
man, exp, bc = MP_BASE(1), 0, 1
direction = 0
complex_factors = []
# First, we multiply all pure real or pure imaginary numbers.
# direction tells us that the result should be multiplied by
# i**direction
for arg in args:
re, im, a, aim = evalf(arg, prec, options)
if re and im:
complex_factors.append((re, im, a, aim))
continue
elif re:
s, m, e, b = re
elif im:
a = aim
direction += 1
s, m, e, b = im
else:
return None, None, None, None
direction += 2*s
man *= m
exp += e
bc += b
if bc > 3*prec:
man >>= prec
exp += prec
acc = min(acc, a)
sign = (direction & 2) >> 1
v = normalize(sign, man, exp, bitcount(man), prec, round_nearest)
if complex_factors:
# Multiply first complex number by the existing real scalar
re, im, re_acc, im_acc = complex_factors[0]
re = mpf_mul(re, v, prec)
im = mpf_mul(im, v, prec)
re_acc = min(re_acc, acc)
im_acc = min(im_acc, acc)
# Multiply consecutive complex factors
complex_factors = complex_factors[1:]
for wre, wim, wre_acc, wim_acc in complex_factors:
re, im, re_acc, im_acc = cmul((re, re_acc), (im,im_acc),
(wre,wre_acc), (wim,wim_acc), prec, target_prec)
if options.get('verbose'):
print "MUL: obtained accuracy", re_acc, im_acc, "expected", target_prec
# multiply by i
if direction & 1:
return mpf_neg(im), re, re_acc, im_acc
else:
return re, im, re_acc, im_acc
else:
# multiply by i
if direction & 1:
return None, v, None, acc
else:
return v, None, acc, None
def evalf_pow(v, prec, options):
target_prec = prec
base, exp = v.args
# We handle x**n separately. This has two purposes: 1) it is much
# faster, because we avoid calling evalf on the exponent, and 2) it
# allows better handling of real/imaginary parts that are exactly zero
if exp.is_Integer:
p = exp.p
# Exact
if not p:
return fone, None, prec, None
# Exponentiation by p magnifies relative error by |p|, so the
# base must be evaluated with increased precision if p is large
prec += int(math.log(abs(p),2))
re, im, re_acc, im_acc = evalf(base, prec+5, options)
# Real to integer power
if re and not im:
return mpf_pow_int(re, p, target_prec), None, target_prec, None
# (x*I)**n = I**n * x**n
if im and not re:
z = fpowi(im, p, target_prec)
case = p % 4
if case == 0: return z, None, target_prec, None
if case == 1: return None, z, None, target_prec
if case == 2: return mpf_neg(z), None, target_prec, None
if case == 3: return None, mpf_neg(z), None, target_prec
# Zero raised to an integer power
if not re:
return None, None, None, None
# General complex number to arbitrary integer power
re, im = libmpc.mpc_pow_int((re, im), p, prec)
# Assumes full accuracy in input
return finalize_complex(re, im, target_prec)
# Pure square root
if exp is S.Half:
xre, xim, xre_acc, yim_acc = evalf(base, prec+5, options)
# General complex square root
if xim:
re, im = libmpc.mpc_sqrt((xre or fzero, xim), prec)
return finalize_complex(re, im, prec)
if not xre:
return None, None, None, None
# Square root of a negative real number
if mpf_lt(xre, fzero):
return None, mpf_sqrt(mpf_neg(xre), prec), None, prec
# Positive square root
return mpf_sqrt(xre, prec), None, prec, None
# We first evaluate the exponent to find its magnitude
# This determines the working precision that must be used
prec += 10
yre, yim, yre_acc, yim_acc = evalf(exp, prec, options)
# Special cases: x**0
if not (yre or yim):
return fone, None, prec, None
ysize = fastlog(yre)
# Restart if too big
# XXX: prec + ysize might exceed maxprec
if ysize > 5:
prec += ysize
yre, yim, yre_acc, yim_acc = evalf(exp, prec, options)
# Pure exponential function; no need to evalf the base
if base is S.Exp1:
if yim:
re, im = libmpc.mpc_exp((yre or fzero, yim), prec)
return finalize_complex(re, im, target_prec)
return mpf_exp(yre, target_prec), None, target_prec, None
xre, xim, xre_acc, yim_acc = evalf(base, prec+5, options)
# 0**y
if not (xre or xim):
return None, None, None, None
# (real ** complex) or (complex ** complex)
if yim:
re, im = libmpc.mpc_pow((xre or fzero, xim or fzero), (yre or fzero, yim),
target_prec)
return finalize_complex(re, im, target_prec)
# complex ** real
if xim:
re, im = libmpc.mpc_pow_mpf((xre or fzero, xim), yre, target_prec)
return finalize_complex(re, im, target_prec)
# negative ** real
elif mpf_lt(xre, fzero):
re, im = libmpc.mpc_pow_mpf((xre, fzero), yre, target_prec)
return finalize_complex(re, im, target_prec)
# positive ** real
else:
return mpf_pow(xre, yre, target_prec), None, target_prec, None
#----------------------------------------------------------------------------#
# #
# Special functions #
# #
#----------------------------------------------------------------------------#
def evalf_trig(v, prec, options):
"""
This function handles sin and cos of real arguments.
TODO: should also handle tan and complex arguments.
"""
if v.func is C.cos:
func = mpf_cos
elif v.func is C.sin:
func = mpf_sin
else:
raise NotImplementedError
arg = v.args[0]
# 20 extra bits is possibly overkill. It does make the need
# to restart very unlikely
xprec = prec + 20
re, im, re_accuracy, im_accuracy = evalf(arg, xprec, options)
if im:
raise NotImplementedError
if not re:
if v.func is C.cos:
return fone, None, prec, None
elif v.func is C.sin:
return None, None, None, None
else:
raise NotImplementedError
# For trigonometric functions, we are interested in the
# fixed-point (absolute) accuracy of the argument.
xsize = fastlog(re)
# Magnitude <= 1.0. OK to compute directly, because there is no
# danger of hitting the first root of cos (with sin, magnitude
# <= 2.0 would actually be ok)
if xsize < 1:
return func(re, prec, round_nearest), None, prec, None
# Very large
if xsize >= 10:
xprec = prec + xsize
re, im, re_accuracy, im_accuracy = evalf(arg, xprec, options)
# Need to repeat in case the argument is very close to a
# multiple of pi (or pi/2), hitting close to a root
while 1:
y = func(re, prec, round_nearest)
ysize = fastlog(y)
gap = -ysize
accuracy = (xprec - xsize) - gap
if accuracy < prec:
if options.get('verbose'):
print "SIN/COS", accuracy, "wanted", prec, "gap", gap
print to_str(y,10)
if xprec > options.get('maxprec', DEFAULT_MAXPREC):
return y, None, accuracy, None
xprec += gap
re, im, re_accuracy, im_accuracy = evalf(arg, xprec, options)
continue
else:
return y, None, prec, None
def evalf_log(expr, prec, options):
arg = expr.args[0]
workprec = prec+10
xre, xim, xacc, _ = evalf(arg, workprec, options)
if xim:
# XXX: use get_abs etc instead
re = evalf_log(C.log(C.abs(arg, evaluate=False), evaluate=False), prec, options)
im = mpf_atan2(xim, xre or fzero, prec)
return re[0], im, re[2], prec
imaginary_term = (mpf_cmp(xre, fzero) < 0)
re = mpf_log(mpf_abs(xre), prec, round_nearest)
size = fastlog(re)
if prec - size > workprec:
# We actually need to compute 1+x accurately, not x
arg = C.Add(S.NegativeOne,arg,evaluate=False)
xre, xim, xre_acc, xim_acc = evalf_add(arg, prec, options)
prec2 = workprec - fastlog(xre)
re = mpf_log(mpf_add(xre, fone, prec2), prec, round_nearest)
re_acc = prec
if imaginary_term:
return re, mpf_pi(prec), re_acc, prec
else:
return re, None, re_acc, None
def evalf_atan(v, prec, options):
arg = v.args[0]
xre, xim, reacc, imacc = evalf(arg, prec+5, options)
if xim:
raise NotImplementedError
return mpf_atan(xre, prec, round_nearest), None, prec, None
def evalf_piecewise(expr, prec, options):
if 'subs' in options:
expr = expr.subs(options['subs'])
del options['subs']
if hasattr(expr,'func'):
return evalf(expr, prec, options)
if type(expr) == float:
return evalf(C.Real(expr), prec, options)
if type(expr) == int:
return evalf(C.Integer(expr), prec, options)
# We still have undefined symbols
raise NotImplementedError
def evalf_piecewise(expr, prec, options):
if 'subs' in options:
expr = expr.subs(options['subs'])
del options['subs']
if hasattr(expr,'func'):
return evalf(expr, prec, options)
if type(expr) == float:
return evalf(C.Real(expr), prec, options)
if type(expr) == int:
return evalf(C.Integer(expr), prec, options)
# We still have undefined symbols
raise NotImplementedError
def evalf_bernoulli(expr, prec, options):
arg = expr.args[0]
if not arg.is_Integer:
raise ValueError("Bernoulli number index must be an integer")
n = int(arg)
b = mpf_bernoulli(n, prec, round_nearest)
if b == fzero:
return None, None, None, None
return b, None, prec, None
#----------------------------------------------------------------------------#
# #
# High-level operations #
# #
#----------------------------------------------------------------------------#
def as_mpmath(x, prec, options):
x = sympify(x)
if isinstance(x, C.Zero):
return mpf(0)
if isinstance(x, C.Infinity):
return mpf('inf')
if isinstance(x, C.NegativeInfinity):
return mpf('-inf')
# XXX
re, im, _, _ = evalf(x, prec, options)
if im:
return mpc(re or fzero, im)
return mpf(re)
def do_integral(expr, prec, options):
func = expr.args[0]
x, (xlow, xhigh) = expr.args[1][0]
orig = mp.prec
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
options['maxprec'] = min(oldmaxprec, 2*prec)
try:
mp.prec = prec+5
xlow = as_mpmath(xlow, prec+15, options)
xhigh = as_mpmath(xhigh, prec+15, options)
# Integration is like summation, and we can phone home from
# the integrand function to update accuracy summation style
# Note that this accuracy is inaccurate, since it fails
# to account for the variable quadrature weights,
# but it is better than nothing
have_part = [False, False]
max_real_term = [MINUS_INF]
max_imag_term = [MINUS_INF]
def f(t):
re, im, re_acc, im_acc = evalf(func, mp.prec, {'subs':{x:t}})
have_part[0] = re or have_part[0]
have_part[1] = im or have_part[1]
max_real_term[0] = max(max_real_term[0], fastlog(re))
max_imag_term[0] = max(max_imag_term[0], fastlog(im))
if im:
return mpc(re or fzero, im)
return mpf(re or fzero)
if options.get('quad') == 'osc':
A = C.Wild('A', exclude=[x])
B = C.Wild('B', exclude=[x])
D = C.Wild('D')
m = func.match(C.cos(A*x+B)*D)
if not m:
m = func.match(C.sin(A*x+B)*D)
if not m:
raise ValueError("An integrand of the form sin(A*x+B)*f(x) "
"or cos(A*x+B)*f(x) is required for oscillatory quadrature")
period = as_mpmath(2*S.Pi/m[A], prec+15, options)
result = quadosc(f, [xlow, xhigh], period=period)
# XXX: quadosc does not do error detection yet
quadrature_error = MINUS_INF
else:
result, quadrature_error = quadts(f, [xlow, xhigh], error=1)
quadrature_error = fastlog(quadrature_error._mpf_)
finally:
options['maxprec'] = oldmaxprec
mp.prec = orig
if have_part[0]:
re = result.real._mpf_
if re == fzero:
re = mpf_shift(fone, min(-prec,-max_real_term[0],-quadrature_error))
re_acc = -1
else:
re_acc = -max(max_real_term[0]-fastlog(re)-prec, quadrature_error)
else:
re, re_acc = None, None
if have_part[1]:
im = result.imag._mpf_
if im == fzero:
im = mpf_shift(fone, min(-prec,-max_imag_term[0],-quadrature_error))
im_acc = -1
else:
im_acc = -max(max_imag_term[0]-fastlog(im)-prec, quadrature_error)
else:
im, im_acc = None, None
result = re, im, re_acc, im_acc
return result
def evalf_integral(expr, prec, options):
workprec = prec
i = 0
maxprec = options.get('maxprec', INF)
while 1:
result = do_integral(expr, workprec, options)
accuracy = complex_accuracy(result)
if accuracy >= prec or workprec >= maxprec:
return result
workprec += prec - max(-2**i, accuracy)
i += 1
def check_convergence(numer, denom, n):
"""
Returns (h, g, p) where
-- h is:
> 0 for convergence of rate 1/factorial(n)**h
< 0 for divergence of rate factorial(n)**(-h)
= 0 for geometric or polynomial convergence or divergence
-- abs(g) is:
> 1 for geometric convergence of rate 1/h**n
< 1 for geometric divergence of rate h**n
= 1 for polynomial convergence or divergence
(g < 0 indicates an alternating series)
-- p is:
> 1 for polynomial convergence of rate 1/n**h
<= 1 for polynomial divergence of rate n**(-h)
"""
npol = C.Poly(numer, n)
dpol = C.Poly(denom, n)
p = npol.degree
q = dpol.degree
rate = q - p
if rate:
return rate, None, None
constant = dpol.lead_term[0] / npol.lead_term[0]
if abs(constant) != 1:
return rate, constant, None
if npol.degree == dpol.degree == 0:
return rate, constant, 0
pc = list(npol.iter_all_terms())[1][0]
qc = list(dpol.iter_all_terms())[1][0]
return rate, constant, qc-pc
def hypsum(expr, n, start, prec):
"""
Sum a rapidly convergent infinite hypergeometric series with
given general term, e.g. e = hypsum(1/factorial(n), n). The
quotient between successive terms must be a quotient of integer
polynomials.
"""
from sympy import hypersimp, lambdify
if start:
expr = expr.subs(n, n+start)
hs = hypersimp(expr, n)
if hs is None:
raise NotImplementedError("a hypergeometric series is required")
num, den = hs.as_numer_denom()
func1 = lambdify(n, num)
func2 = lambdify(n, den)
h, g, p = check_convergence(num, den, n)
if h < 0:
raise ValueError("Sum diverges like (n!)^%i" % (-h))
# Direct summation if geometric or faster
if h > 0 or (h == 0 and abs(g) > 1):
one = MP_BASE(1) << prec
term = expr.subs(n, 0)
term = (MP_BASE(term.p) << prec) // term.q
s = term
k = 1
while abs(term) > 5:
term *= MP_BASE(func1(k-1))
term //= MP_BASE(func2(k-1))
s += term
k += 1
return from_man_exp(s, -prec)
else:
alt = g < 0
if abs(g) < 1:
raise ValueError("Sum diverges like (%i)^n" % abs(1/g))
if p < 1 or (p == 1 and not alt):
raise ValueError("Sum diverges like n^%i" % (-p))
# We have polynomial convergence: use Richardson extrapolation
# Need to use at least quad precision because a lot of cancellation
# might occur in the extrapolation process
prec2 = 4*prec
one = MP_BASE(1) << prec2
term = expr.subs(n, 0)
term = (MP_BASE(term.p) << prec2) // term.q
def summand(k, _term=[term]):
if k:
k = int(k)
_term[0] *= MP_BASE(func1(k-1))
_term[0] //= MP_BASE(func2(k-1))
return make_mpf(from_man_exp(_term[0], -prec2))
orig = mp.prec
try:
mp.prec = prec
v = nsum(summand, [0, mpmath_inf], method='richardson')
finally:
mp.prec = orig
return v._mpf_
def evalf_sum(expr, prec, options):
func = expr.function
limits = expr.limits
if len(limits) != 1 or not isinstance(limits[0], tuple) or \
len(limits[0]) != 3:
raise NotImplementedError
prec2 = prec+10
try:
n, a, b = limits[0]
if b != S.Infinity or a != int(a):
raise NotImplementedError
# Use fast hypergeometric summation if possible
v = hypsum(func, n, int(a), prec2)
delta = prec - fastlog(v)
if fastlog(v) < -10:
v = hypsum(func, n, int(a), delta)
return v, None, min(prec, delta), None
except NotImplementedError:
# Euler-Maclaurin summation for general series
eps = C.Real(2.0)**(-prec)
for i in range(1, 5):
m = n = 2**i * prec
s, err = expr.euler_maclaurin(m=m, n=n, eps=eps, \
eval_integral=False)
err = err.evalf()
if err <= eps:
break
err = fastlog(evalf(abs(err), 20, options)[0])
re, im, re_acc, im_acc = evalf(s, prec2, options)
re_acc = max(re_acc, -err)
im_acc = max(im_acc, -err)
return re, im, re_acc, im_acc
#----------------------------------------------------------------------------#
# #
# Symbolic interface #
# #
#----------------------------------------------------------------------------#
def evalf_symbol(x, prec, options):
val = options['subs'][x]
if isinstance(val, mpf):
if not val:
return None, None, None, None
return val._mpf_, None, prec, None
else:
if not '_cache' in options:
options['_cache'] = {}
cache = options['_cache']
cached, cached_prec = cache.get(x.name, (None, MINUS_INF))
if cached_prec >= prec:
return cached
v = evalf(sympify(val), prec, options)
cache[x.name] = (v, prec)
return v
evalf_table = None
def _create_evalf_table():
global evalf_table
evalf_table = {
C.Symbol : evalf_symbol,
C.Dummy : evalf_symbol,
C.Real : lambda x, prec, options: (x._mpf_, None, prec, None),
C.Rational : lambda x, prec, options: (from_rational(x.p, x.q, prec), None, prec, None),
C.Integer : lambda x, prec, options: (from_int(x.p, prec), None, prec, None),
C.Zero : lambda x, prec, options: (None, None, prec, None),
C.One : lambda x, prec, options: (fone, None, prec, None),
C.Half : lambda x, prec, options: (fhalf, None, prec, None),
C.Pi : lambda x, prec, options: (mpf_pi(prec), None, prec, None),
C.Exp1 : lambda x, prec, options: (mpf_e(prec), None, prec, None),
C.ImaginaryUnit : lambda x, prec, options: (None, fone, None, prec),
C.NegativeOne : lambda x, prec, options: (fnone, None, prec, None),
C.exp : lambda x, prec, options: evalf_pow(C.Pow(S.Exp1, x.args[0],
evaluate=False), prec, options),
C.cos : evalf_trig,
C.sin : evalf_trig,
C.Add : evalf_add,
C.Mul : evalf_mul,
C.Pow : evalf_pow,
C.log : evalf_log,
C.atan : evalf_atan,
C.abs : evalf_abs,
C.re : evalf_re,
C.im : evalf_im,
C.floor : evalf_floor,
C.ceiling : evalf_ceiling,
C.Integral : evalf_integral,
C.Sum : evalf_sum,
C.Piecewise : evalf_piecewise,
C.bernoulli : evalf_bernoulli,
}
def evalf(x, prec, options):
try:
rf = evalf_table[x.func]
r = rf(x, prec, options)
except KeyError:
#r = finalize_complex(x._eval_evalf(prec)._mpf_, fzero, prec)
try:
# Fall back to ordinary evalf if possible
if 'subs' in options:
x = x.subs(options['subs'])
r = x._eval_evalf(prec)._mpf_, None, prec, None
except AttributeError:
raise NotImplementedError
if options.get("verbose"):
print "### input", x
print "### output", to_str(r[0] or fzero, 50)
#print "### raw", r[0], r[2]
print
if options.get("chop"):
r = chop_parts(r, prec)
if options.get("strict"):
check_target(x, r, prec)
return r
def Basic_evalf(x, n=15, **options):
"""
Evaluate the given formula to an accuracy of n digits.
Optional keyword arguments:
subs=<dict>
Substitute numerical values for symbols, e.g.
subs={x:3, y:1+pi}.
maxprec=N
Allow a maximum temporary working precision of N digits
(default=100)
chop=<bool>
Replace tiny real or imaginary parts in subresults
by exact zeros (default=False)
strict=<bool>
Raise PrecisionExhausted if any subresult fails to evaluate
to full accuracy, given the available maxprec
(default=False)
quad=<str>
Choose algorithm for numerical quadrature. By default,
tanh-sinh quadrature is used. For oscillatory
integrals on an infinite interval, try quad='osc'.
verbose=<bool>
Print debug information (default=False)
"""
if not evalf_table:
_create_evalf_table()
prec = dps_to_prec(n)
if 'maxprec' in options:
options['maxprec'] = int(options['maxprec']*LG10)
else:
options['maxprec'] = max(prec, DEFAULT_MAXPREC)
try:
result = evalf(x, prec+4, options)
except NotImplementedError:
# Fall back to the ordinary evalf
v = x._eval_evalf(prec)
if v is None:
return x
try:
# If the result is numerical, normalize it
result = evalf(v, prec, options)
except:
# Probably contains symbols or unknown functions
return v
re, im, re_acc, im_acc = result
if re:
p = max(min(prec, re_acc), 1)
#re = fpos(re, p, round_nearest)
re = C.Real._new(re, p)
else:
re = S.Zero
if im:
p = max(min(prec, im_acc), 1)
#im = fpos(im, p, round_nearest)
im = C.Real._new(im, p)
return re + im*S.ImaginaryUnit
else:
return re
Basic.evalf = Basic.n = Basic_evalf
def N(x, n=15, **options):
"""
Calls x.evalf(n, **options).
Both .evalf() and N() are equivalent, use the one that you like better.
Example:
>>> from sympy import Sum, Symbol, oo
>>> k = Symbol("k")
>>> Sum(1/k**k, (k, 1, oo))
Sum(k**(-k), (k, 1, oo))
>>> N(Sum(1/k**k, (k, 1, oo)), 4)
1.291
"""
return sympify(x).evalf(n, **options)
|
|
#!/usr/bin/env python
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
module = 'axis_ram_switch'
testbench = 'test_%s_1x4_256_64' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
# srcs.append("../rtl/axis_ram_switch_input.v")
# srcs.append("../rtl/axis_ram_switch_output.v")
srcs.append("../rtl/axis_adapter.v")
srcs.append("../rtl/arbiter.v")
srcs.append("../rtl/priority_encoder.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
FIFO_DEPTH = 512
SPEEDUP = 0
S_COUNT = 1
M_COUNT = 4
S_DATA_WIDTH = 256
S_KEEP_ENABLE = (S_DATA_WIDTH>8)
S_KEEP_WIDTH = (S_DATA_WIDTH/8)
M_DATA_WIDTH = 64
M_KEEP_ENABLE = (M_DATA_WIDTH>8)
M_KEEP_WIDTH = (M_DATA_WIDTH/8)
ID_ENABLE = 1
ID_WIDTH = 8
DEST_WIDTH = (M_COUNT+1-1).bit_length()
USER_ENABLE = 1
USER_WIDTH = 1
USER_BAD_FRAME_VALUE = 1
USER_BAD_FRAME_MASK = 1
DROP_BAD_FRAME = 1
DROP_WHEN_FULL = 0
M_BASE = [0, 1, 2, 3]
M_TOP = [0, 1, 2, 3]
M_CONNECT = [0b1111]*M_COUNT
ARB_TYPE_ROUND_ROBIN = 1
ARB_LSB_HIGH_PRIORITY = 1
RAM_PIPELINE = 2
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_tdata_list = [Signal(intbv(0)[S_DATA_WIDTH:]) for i in range(S_COUNT)]
s_axis_tkeep_list = [Signal(intbv(1)[S_KEEP_WIDTH:]) for i in range(S_COUNT)]
s_axis_tvalid_list = [Signal(bool(0)) for i in range(S_COUNT)]
s_axis_tlast_list = [Signal(bool(0)) for i in range(S_COUNT)]
s_axis_tid_list = [Signal(intbv(0)[ID_WIDTH:]) for i in range(S_COUNT)]
s_axis_tdest_list = [Signal(intbv(0)[DEST_WIDTH:]) for i in range(S_COUNT)]
s_axis_tuser_list = [Signal(intbv(0)[USER_WIDTH:]) for i in range(S_COUNT)]
# s_axis_tdata = ConcatSignal(*reversed(s_axis_tdata_list))
# s_axis_tkeep = ConcatSignal(*reversed(s_axis_tkeep_list))
# s_axis_tvalid = ConcatSignal(*reversed(s_axis_tvalid_list))
# s_axis_tlast = ConcatSignal(*reversed(s_axis_tlast_list))
# s_axis_tid = ConcatSignal(*reversed(s_axis_tid_list))
# s_axis_tdest = ConcatSignal(*reversed(s_axis_tdest_list))
# s_axis_tuser = ConcatSignal(*reversed(s_axis_tuser_list))
if S_COUNT == 1:
s_axis_tdata = s_axis_tdata_list[0]
s_axis_tkeep = s_axis_tkeep_list[0]
s_axis_tvalid = s_axis_tvalid_list[0]
s_axis_tlast = s_axis_tlast_list[0]
s_axis_tid = s_axis_tid_list[0]
s_axis_tdest = s_axis_tdest_list[0]
s_axis_tuser = s_axis_tuser_list[0]
else:
s_axis_tdata = ConcatSignal(*reversed(s_axis_tdata_list))
s_axis_tkeep = ConcatSignal(*reversed(s_axis_tkeep_list))
s_axis_tvalid = ConcatSignal(*reversed(s_axis_tvalid_list))
s_axis_tlast = ConcatSignal(*reversed(s_axis_tlast_list))
s_axis_tid = ConcatSignal(*reversed(s_axis_tid_list))
s_axis_tdest = ConcatSignal(*reversed(s_axis_tdest_list))
s_axis_tuser = ConcatSignal(*reversed(s_axis_tuser_list))
m_axis_tready_list = [Signal(bool(0)) for i in range(M_COUNT)]
# m_axis_tready = ConcatSignal(*reversed(m_axis_tready_list))
if M_COUNT == 1:
m_axis_tready = m_axis_tready_list[0]
else:
m_axis_tready = ConcatSignal(*reversed(m_axis_tready_list))
# Outputs
s_axis_tready = Signal(intbv(0)[S_COUNT:])
s_axis_tready_list = [s_axis_tready(i) for i in range(S_COUNT)]
m_axis_tdata = Signal(intbv(0)[M_COUNT*M_DATA_WIDTH:])
m_axis_tkeep = Signal(intbv(0xf)[M_COUNT*M_KEEP_WIDTH:])
m_axis_tvalid = Signal(intbv(0)[M_COUNT:])
m_axis_tlast = Signal(intbv(0)[M_COUNT:])
m_axis_tid = Signal(intbv(0)[M_COUNT*ID_WIDTH:])
m_axis_tdest = Signal(intbv(0)[M_COUNT*DEST_WIDTH:])
m_axis_tuser = Signal(intbv(0)[M_COUNT*USER_WIDTH:])
m_axis_tdata_list = [m_axis_tdata((i+1)*M_DATA_WIDTH, i*M_DATA_WIDTH) for i in range(M_COUNT)]
m_axis_tkeep_list = [m_axis_tkeep((i+1)*M_KEEP_WIDTH, i*M_KEEP_WIDTH) for i in range(M_COUNT)]
m_axis_tvalid_list = [m_axis_tvalid(i) for i in range(M_COUNT)]
m_axis_tlast_list = [m_axis_tlast(i) for i in range(M_COUNT)]
m_axis_tid_list = [m_axis_tid((i+1)*ID_WIDTH, i*ID_WIDTH) for i in range(M_COUNT)]
m_axis_tdest_list = [m_axis_tdest((i+1)*DEST_WIDTH, i*DEST_WIDTH) for i in range(M_COUNT)]
m_axis_tuser_list = [m_axis_tuser((i+1)*USER_WIDTH, i*USER_WIDTH) for i in range(M_COUNT)]
status_overflow = Signal(intbv(0)[S_COUNT:])
status_bad_frame = Signal(intbv(0)[S_COUNT:])
status_good_frame = Signal(intbv(0)[S_COUNT:])
# sources and sinks
source_pause_list = []
source_list = []
source_logic_list = []
sink_pause_list = []
sink_list = []
sink_logic_list = []
for k in range(S_COUNT):
s = axis_ep.AXIStreamSource()
p = Signal(bool(0))
source_list.append(s)
source_pause_list.append(p)
source_logic_list.append(s.create_logic(
clk,
rst,
tdata=s_axis_tdata_list[k],
tkeep=s_axis_tkeep_list[k],
tvalid=s_axis_tvalid_list[k],
tready=s_axis_tready_list[k],
tlast=s_axis_tlast_list[k],
tid=s_axis_tid_list[k],
tdest=s_axis_tdest_list[k],
tuser=s_axis_tuser_list[k],
pause=p,
name='source_%d' % k
))
for k in range(M_COUNT):
s = axis_ep.AXIStreamSink()
p = Signal(bool(0))
sink_list.append(s)
sink_pause_list.append(p)
sink_logic_list.append(s.create_logic(
clk,
rst,
tdata=m_axis_tdata_list[k],
tkeep=m_axis_tkeep_list[k],
tvalid=m_axis_tvalid_list[k],
tready=m_axis_tready_list[k],
tlast=m_axis_tlast_list[k],
tid=m_axis_tid_list[k],
tdest=m_axis_tdest_list[k],
tuser=m_axis_tuser_list[k],
pause=p,
name='sink_%d' % k
))
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_axis_tdata=s_axis_tdata,
s_axis_tkeep=s_axis_tkeep,
s_axis_tvalid=s_axis_tvalid,
s_axis_tready=s_axis_tready,
s_axis_tlast=s_axis_tlast,
s_axis_tid=s_axis_tid,
s_axis_tdest=s_axis_tdest,
s_axis_tuser=s_axis_tuser,
m_axis_tdata=m_axis_tdata,
m_axis_tkeep=m_axis_tkeep,
m_axis_tvalid=m_axis_tvalid,
m_axis_tready=m_axis_tready,
m_axis_tlast=m_axis_tlast,
m_axis_tid=m_axis_tid,
m_axis_tdest=m_axis_tdest,
m_axis_tuser=m_axis_tuser,
status_overflow=status_overflow,
status_bad_frame=status_bad_frame,
status_good_frame=status_good_frame
)
@always(delay(4))
def clkgen():
clk.next = not clk
status_overflow_latch = Signal(intbv(0)[S_COUNT:])
status_bad_frame_latch = Signal(intbv(0)[S_COUNT:])
status_good_frame_latch = Signal(intbv(0)[S_COUNT:])
@always(clk.posedge)
def monitor():
if status_overflow:
status_overflow_latch.next = status_overflow_latch | status_overflow
if status_bad_frame:
status_bad_frame_latch.next = status_bad_frame_latch | status_bad_frame
if status_good_frame:
status_good_frame_latch.next = status_good_frame_latch | status_good_frame
def wait_normal():
while s_axis_tvalid:
yield clk.posedge
def wait_pause_source():
while s_axis_tvalid:
yield clk.posedge
yield clk.posedge
for k in range(S_COUNT):
source_pause_list[k].next = False
yield clk.posedge
for k in range(S_COUNT):
source_pause_list[k].next = True
yield clk.posedge
for k in range(S_COUNT):
source_pause_list[k].next = False
def wait_pause_sink():
while s_axis_tvalid:
for k in range(M_COUNT):
sink_pause_list[k].next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
for k in range(M_COUNT):
sink_pause_list[k].next = False
yield clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: 0000 -> 0123")
current_test.next = 1
test_frame0 = axis_ep.AXIStreamFrame(b'\x02\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x02\x00\x01\xFF'+bytearray(range(256)), id=0, dest=1)
test_frame2 = axis_ep.AXIStreamFrame(b'\x02\x00\x02\xFF'+bytearray(range(256)), id=0, dest=2)
test_frame3 = axis_ep.AXIStreamFrame(b'\x02\x00\x03\xFF'+bytearray(range(256)), id=0, dest=3)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
status_overflow_latch.next = 0
status_bad_frame_latch.next = 0
status_good_frame_latch.next = 0
source_list[0].send(test_frame0)
source_list[0].send(test_frame1)
source_list[0].send(test_frame2)
source_list[0].send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
yield sink_list[1].wait()
rx_frame1 = sink_list[1].recv()
assert rx_frame1 == test_frame1
yield sink_list[2].wait()
rx_frame2 = sink_list[2].recv()
assert rx_frame2 == test_frame2
yield sink_list[3].wait()
rx_frame3 = sink_list[3].recv()
assert rx_frame3 == test_frame3
assert sink_list[0].empty()
assert sink_list[1].empty()
assert sink_list[2].empty()
assert sink_list[3].empty()
assert status_overflow_latch == 0x0
assert status_bad_frame_latch == 0x0
assert status_good_frame_latch == 0x1
yield delay(100)
yield clk.posedge
print("test 2: 0000 -> 0000")
current_test.next = 2
test_frame0 = axis_ep.AXIStreamFrame(b'\x02\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x02\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
test_frame2 = axis_ep.AXIStreamFrame(b'\x02\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
test_frame3 = axis_ep.AXIStreamFrame(b'\x02\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
status_overflow_latch.next = 0
status_bad_frame_latch.next = 0
status_good_frame_latch.next = 0
source_list[0].send(test_frame0)
source_list[0].send(test_frame1)
source_list[0].send(test_frame2)
source_list[0].send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
yield sink_list[0].wait()
rx_frame1 = sink_list[0].recv()
assert rx_frame1 == test_frame1
yield sink_list[0].wait()
rx_frame2 = sink_list[0].recv()
assert rx_frame2 == test_frame2
yield sink_list[0].wait()
rx_frame3 = sink_list[0].recv()
assert rx_frame3 == test_frame3
assert sink_list[0].empty()
assert sink_list[1].empty()
assert sink_list[2].empty()
assert sink_list[3].empty()
assert status_overflow_latch == 0x0
assert status_bad_frame_latch == 0x0
assert status_good_frame_latch == 0x1
yield delay(100)
yield clk.posedge
print("test 3: bad decoding")
current_test.next = 3
test_frame0 = axis_ep.AXIStreamFrame(b'\x03\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x03\x00\x01\xFF'+bytearray(range(256)), id=0, dest=1)
test_frame2 = axis_ep.AXIStreamFrame(b'\x03\x00\x04\xFF'+bytearray(range(256)), id=0, dest=4)
test_frame3 = axis_ep.AXIStreamFrame(b'\x03\x00\x05\xFF'+bytearray(range(256)), id=0, dest=5)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
status_overflow_latch.next = 0
status_bad_frame_latch.next = 0
status_good_frame_latch.next = 0
source_list[0].send(test_frame0)
source_list[0].send(test_frame1)
source_list[0].send(test_frame2)
source_list[0].send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
yield sink_list[1].wait()
rx_frame1 = sink_list[1].recv()
assert rx_frame1 == test_frame1
assert sink_list[0].empty()
assert sink_list[1].empty()
assert sink_list[2].empty()
assert sink_list[3].empty()
assert status_overflow_latch == 0x0
assert status_bad_frame_latch == 0x0
assert status_good_frame_latch == 0x1
yield delay(100)
yield clk.posedge
print("test 4: tuser assert")
current_test.next = 4
test_frame0 = axis_ep.AXIStreamFrame(b'\x04\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x04\x00\x01\xFF'+bytearray(range(256)), id=0, dest=1, last_cycle_user=1)
test_frame2 = axis_ep.AXIStreamFrame(b'\x04\x00\x02\xFF'+bytearray(range(256)), id=0, dest=2)
test_frame3 = axis_ep.AXIStreamFrame(b'\x04\x00\x03\xFF'+bytearray(range(256)), id=0, dest=3)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
status_overflow_latch.next = 0
status_bad_frame_latch.next = 0
status_good_frame_latch.next = 0
source_list[0].send(test_frame0)
source_list[0].send(test_frame1)
source_list[0].send(test_frame2)
source_list[0].send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
yield sink_list[2].wait()
rx_frame2 = sink_list[2].recv()
assert rx_frame2 == test_frame2
yield sink_list[3].wait()
rx_frame3 = sink_list[3].recv()
assert rx_frame3 == test_frame3
assert sink_list[0].empty()
assert sink_list[1].empty()
assert sink_list[2].empty()
assert sink_list[3].empty()
assert status_overflow_latch == 0x0
assert status_bad_frame_latch == 0x1
assert status_good_frame_latch == 0x1
yield delay(100)
yield clk.posedge
print("test 5: single packet overflow")
current_test.next = 5
test_frame0 = axis_ep.AXIStreamFrame(b'\x05\x00\x00\xFF'+bytearray(range(256))*3, id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x05\x01\x01\xFF'+bytearray(range(256))*3, id=0, dest=1)
test_frame2 = axis_ep.AXIStreamFrame(b'\x05\x02\x02\xFF'+bytearray(range(256))*3, id=0, dest=2)
test_frame3 = axis_ep.AXIStreamFrame(b'\x05\x03\x03\xFF'+bytearray(range(256))*3, id=0, dest=3)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
status_overflow_latch.next = 0
status_bad_frame_latch.next = 0
status_good_frame_latch.next = 0
source_list[0].send(test_frame0)
source_list[0].send(test_frame1)
source_list[0].send(test_frame2)
source_list[0].send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield delay(100)
assert sink_list[0].empty()
assert sink_list[1].empty()
assert sink_list[2].empty()
assert sink_list[3].empty()
assert status_overflow_latch == 0x1
assert status_bad_frame_latch == 0x0
assert status_good_frame_latch == 0x0
yield delay(100)
yield clk.posedge
print("test 6: initial sink pause")
current_test.next = 6
test_frame0 = axis_ep.AXIStreamFrame(b'\x06\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
status_overflow_latch.next = 0
status_bad_frame_latch.next = 0
status_good_frame_latch.next = 0
source_list[0].send(test_frame0)
for k in range(M_COUNT):
sink_pause_list[k].next = True
yield clk.posedge
yield clk.posedge
while (s_axis_tvalid):
yield clk.posedge
for k in range(20):
yield clk.posedge
for k in range(M_COUNT):
sink_pause_list[k].next = False
yield wait_normal()
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
assert sink_list[0].empty()
assert sink_list[1].empty()
assert sink_list[2].empty()
assert sink_list[3].empty()
assert status_overflow_latch == 0x0
assert status_bad_frame_latch == 0x0
assert status_good_frame_latch == 0x1
yield delay(100)
yield clk.posedge
print("test 7: initial sink pause, reset")
current_test.next = 7
test_frame0 = axis_ep.AXIStreamFrame(b'\x07\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
status_overflow_latch.next = 0
status_bad_frame_latch.next = 0
status_good_frame_latch.next = 0
source_list[0].send(test_frame0)
for k in range(M_COUNT):
sink_pause_list[k].next = True
yield clk.posedge
yield clk.posedge
while (s_axis_tvalid):
yield clk.posedge
for k in range(20):
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
for k in range(M_COUNT):
sink_pause_list[k].next = False
yield delay(500)
assert sink_list[0].empty()
assert sink_list[1].empty()
assert sink_list[2].empty()
assert sink_list[3].empty()
assert status_overflow_latch == 0x0
assert status_bad_frame_latch == 0x0
assert status_good_frame_latch == 0x1
yield delay(100)
yield clk.posedge
print("test 8: backpressure test")
current_test.next = 8
test_frame0 = axis_ep.AXIStreamFrame(b'\x08\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x08\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
test_frame2 = axis_ep.AXIStreamFrame(b'\x08\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
test_frame3 = axis_ep.AXIStreamFrame(b'\x08\x00\x00\xFF'+bytearray(range(256)), id=0, dest=0)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
status_overflow_latch.next = 0
status_bad_frame_latch.next = 0
status_good_frame_latch.next = 0
source_list[0].send(test_frame0)
source_list[0].send(test_frame1)
source_list[0].send(test_frame2)
source_list[0].send(test_frame3)
for k in range(M_COUNT):
sink_pause_list[k].next = True
for k in range(100):
yield clk.posedge
for k in range(M_COUNT):
sink_pause_list[k].next = False
yield wait()
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
yield sink_list[0].wait()
rx_frame1 = sink_list[0].recv()
assert rx_frame1 == test_frame1
yield sink_list[0].wait()
rx_frame2 = sink_list[0].recv()
assert rx_frame2 == test_frame2
yield sink_list[0].wait()
rx_frame3 = sink_list[0].recv()
assert rx_frame3 == test_frame3
assert sink_list[0].empty()
assert sink_list[1].empty()
assert sink_list[2].empty()
assert sink_list[3].empty()
assert status_overflow_latch == 0x0
assert status_bad_frame_latch == 0x0
assert status_good_frame_latch == 0x1
yield delay(100)
yield clk.posedge
print("test 9: many small packets, one to one")
current_test.next = 9
test_frame0 = axis_ep.AXIStreamFrame(b'\x09\x00\x00\xFF'+bytearray(range(4)), id=0, dest=0)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
status_overflow_latch.next = 0
status_bad_frame_latch.next = 0
status_good_frame_latch.next = 0
for k in range(64):
source_list[0].send(test_frame0)
yield clk.posedge
yield clk.posedge
yield wait()
for k in range(64):
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
assert sink_list[0].empty()
assert sink_list[1].empty()
assert sink_list[2].empty()
assert sink_list[3].empty()
assert status_overflow_latch == 0x0
assert status_bad_frame_latch == 0x0
assert status_good_frame_latch == 0x1
yield delay(100)
yield clk.posedge
print("test 10: many small packets, one to many")
current_test.next = 10
test_frame0 = axis_ep.AXIStreamFrame(b'\x0A\x00\x00\xFF'+bytearray(range(4)), id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x0A\x00\x01\xFF'+bytearray(range(4)), id=0, dest=1)
test_frame2 = axis_ep.AXIStreamFrame(b'\x0A\x00\x02\xFF'+bytearray(range(4)), id=0, dest=2)
test_frame3 = axis_ep.AXIStreamFrame(b'\x0A\x00\x03\xFF'+bytearray(range(4)), id=0, dest=3)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
status_overflow_latch.next = 0
status_bad_frame_latch.next = 0
status_good_frame_latch.next = 0
for k in range(64):
source_list[0].send(test_frame0)
source_list[0].send(test_frame1)
source_list[0].send(test_frame2)
source_list[0].send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
for k in range(64):
yield sink_list[0].wait()
rx_frame0 = sink_list[0].recv()
assert rx_frame0 == test_frame0
yield sink_list[1].wait()
rx_frame1 = sink_list[1].recv()
assert rx_frame1 == test_frame1
yield sink_list[2].wait()
rx_frame2 = sink_list[2].recv()
assert rx_frame2 == test_frame2
yield sink_list[3].wait()
rx_frame3 = sink_list[3].recv()
assert rx_frame3 == test_frame3
assert sink_list[0].empty()
assert sink_list[1].empty()
assert sink_list[2].empty()
assert sink_list[3].empty()
assert status_overflow_latch == 0x0
assert status_bad_frame_latch == 0x0
assert status_good_frame_latch == 0x1
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
#
# Tests for conversion of hgvs tags
#
import os
import unittest
import hgvs.variantmapper as variantmapper
import hgvs.parser
import framework.mock_input_source as mock_input_data_source
class TestHgvsCToP(unittest.TestCase):
fn = os.path.join(os.path.dirname(__file__), 'data', 'sanity_cp.tsv')
_datasource = mock_input_data_source.MockInputSource(fn)
_mapper = variantmapper.VariantMapper(_datasource)
_parser = hgvs.parser.Parser()
def test_silent(self):
hgvsc = "NM_999999.1:c.6A>G"
hgvsp_expected = "MOCK:p.(=)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_substitution(self):
hgvsc = "NM_999999.1:c.6A>T"
hgvsp_expected = "MOCK:p.(Lys2Asn)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_substitution_introduces_stop_codon(self):
hgvsc = "NM_999996.1:c.8C>A"
hgvsp_expected = "MOCK:p.(Ser3Ter)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_substitution_removes_stop_codon(self):
hgvsc = "NM_999998.1:c.30G>T"
hgvsp_expected = "MOCK:p.(Ter10TyrextTer3)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_substitution_removes_start_codon(self):
hgvsc = "NM_999999.1:c.1A>G"
hgvsp_expected = "MOCK:p.(Met1?)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_insertion_no_frameshift(self):
hgvsc = "NM_999999.1:c.6_7insGGG"
hgvsp_expected = "MOCK:p.(Lys2_Ala3insGly)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_insertion_frameshift(self):
hgvsc = "NM_999999.1:c.22_23insT"
hgvsp_expected = "MOCK:p.(Ala8ValfsTer?)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_insertion_adds_stop(self):
hgvsc = "NM_999999.1:c.8_9insTT"
hgvsp_expected = "MOCK:p.(Lys4Ter)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion_no_frameshift(self):
hgvsc = "NM_999999.1:c.10_12del"
hgvsp_expected = "MOCK:p.(Lys4del)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion2_no_frameshift(self):
hgvsc = "NM_999999.1:c.4_15del"
hgvsp_expected = "MOCK:p.(Lys2_Ala5del)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion3_no_frameshift_c_term(self):
hgvsc = "NM_999995.1:c.4_6del"
hgvsp_expected = "MOCK:p.(Lys3del)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion4_no_frameshift_c_term(self):
hgvsc = "NM_999994.1:c.4_9del"
hgvsp_expected = "MOCK:p.(Lys3_Lys4del)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion5_no_frameshift(self):
hgvsc = "NM_999994.1:c.20_25del"
hgvsp_expected = "MOCK:p.(Ala7_Arg9delinsGly)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion6_no_frameshift(self):
hgvsc = "NM_999999.1:c.5_7del"
hgvsp_expected = "MOCK:p.(Lys2_Ala3delinsThr)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion7_no_frameshift(self):
hgvsc = "NM_999993.1:c.13_24del"
hgvsp_expected = "MOCK:p.(Arg5_Ala8del)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion_frameshift_nostop(self):
hgvsc = "NM_999999.1:c.11_12del"
hgvsp_expected = "MOCK:p.(Lys4SerfsTer?)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion_frameshift_adds_stop(self):
hgvsc = "NM_999997.1:c.7del"
hgvsp_expected = "MOCK:p.(Ala3ArgfsTer6)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion_no_frameshift_removes_stop_plus_previous(self):
hgvsc = "NM_999999.1:c.25_30del"
hgvsp_expected = "MOCK:p.(Lys9_Ter10delinsGly)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_indel_no_frameshift(self):
hgvsc = "NM_999999.1:c.11_12delinsTCCCA"
hgvsp_expected = "MOCK:p.(Lys4delinsIlePro)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_indel2_no_frameshift(self):
hgvsc = "NM_999999.1:c.11_18delinsTCCCA"
hgvsp_expected = "MOCK:p.(Lys4_Phe6delinsIlePro)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_indel_frameshift_nostop(self):
hgvsc = "NM_999999.1:c.8delinsGG"
hgvsp_expected = "MOCK:p.(Ala3GlyfsTer?)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_dup_1AA_no_frameshift_2(self):
hgvsc = "NM_999999.1:c.10_12dup"
hgvsp_expected = "MOCK:p.(Lys4dup)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_dup_1AA_no_frameshift(self):
hgvsc = "NM_999999.1:c.16_18dup"
hgvsp_expected = "MOCK:p.(Phe6dup)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_dup_2AA_no_frameshift(self):
hgvsc = "NM_999999.1:c.16_21dup"
hgvsp_expected = "MOCK:p.(Phe6_Arg7dup)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_dup_2AA2_no_frameshift(self):
hgvsc = "NM_999995.1:c.4_6dup"
hgvsp_expected = "MOCK:p.(Lys3dup)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_dup_3AA_no_frameshift(self):
hgvsc = "NM_999999.1:c.16_24dup"
hgvsp_expected = "MOCK:p.(Phe6_Ala8dup)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_dup_frameshift(self):
hgvsc = "NM_999999.1:c.12_13dup"
hgvsp_expected = "MOCK:p.(Ala5GlufsTer?)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_intron(self):
hgvsc = "NM_999999.1:c.12+1G>A"
hgvsp_expected = "MOCK:p.?"
self._run_conversion(hgvsc, hgvsp_expected)
def test_five_prime_utr(self):
hgvsc = "NM_999999.1:c.-2A>G"
hgvsp_expected = "MOCK:p.?"
self._run_conversion(hgvsc, hgvsp_expected)
def test_three_prime_utr(self):
hgvsc = "NM_999999.1:c.*3G>A"
hgvsp_expected = "MOCK:p.?"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion_into_three_prime_utr_frameshift(self):
hgvsc = "NM_999999.1:c.27_*3del"
hgvsp_expected = "MOCK:p.(Lys9XaafsTer?)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion_into_three_prime_utr_no_frameshift(self):
hgvsc = "NM_999995.1:c.28_*3del"
hgvsp_expected = "MOCK:p.(Lys10_Ter11delinsArgGlnPheArg)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_delins_into_three_prime_utr_no_frameshift(self):
hgvsc = "NM_999995.1:c.28_*3delinsGGG"
hgvsp_expected = "MOCK:p.(Lys10_Ter11delinsGlyArgGlnPheArg)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion_from_five_prime_utr_frameshift(self):
hgvsc = "NM_999999.1:c.-3_1del"
hgvsp_expected = "MOCK:p.(Met1?)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_deletion_from_five_prime_utr_no_frameshift(self):
hgvsc = "NM_999999.1:c.-3_3del"
hgvsp_expected = "MOCK:p.(Met1?)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_delins_from_five_prime_utr_no_frameshift(self):
hgvsc = "NM_999999.1:c.-3_3delinsAAA"
hgvsp_expected = "MOCK:p.(Met1?)"
self._run_conversion(hgvsc, hgvsp_expected)
def test_delete_entire_gene(self):
hgvsc = "NM_999999.1:c.-3_*1del"
hgvsp_expected = "MOCK:p.0?"
self._run_conversion(hgvsc, hgvsp_expected)
def test_multiple_stop_codons(self):
hgvsc = "NM_999992.1:c.4G>A"
hgvsp_expected = "MOCK:p.?"
self._run_conversion(hgvsc, hgvsp_expected)
# The following are unsupported
#
# def test_repeats(self):
# hgvsc = "NM_999999.1:c.12_13[3]"
# hgvsp_expected = ""
# self._run_conversion(hgvsc, hgvsp_expected)
#
# def test_variable_repeats(self):
# pass
#
# def test_indeterminate_entire_exon_del(self):
# pass
#
# def test_indeterminate_entire_exon_dup(self):
# pass
#
# def test_mosaic(self):
# pass
#
# def test_chimera(self):
# pass
#
# def test_two_changes_same_allele(self):
# pass
#
# def test_two_changes_diff_allele(self):
# pass
#
# def test_two_changes_unknown_allele(self):
# pass
def _run_conversion(self, hgvsc, hgvsp_expected):
"""Helper method to actually run the test
:param hgvsc tag
"""
var_c = TestHgvsCToP._parser.parse_hgvs_variant(hgvsc)
ac_p = "MOCK"
hgvsp_actual = str(TestHgvsCToP._mapper.c_to_p(var_c, ac_p))
msg = "hgvsc: {} hgvsp expected: {} actual: {}".format(hgvsc, hgvsp_expected, hgvsp_actual)
self.assertEqual(hgvsp_expected, hgvsp_actual, msg)
# TODO - review other classes of hgvs tags (e.g. utr, intronic) - more use cases?
# 5'utr
# intronic
# after stop codon
# uncertainties in dups/dels (i.e. hgvs tags with ?)
if __name__ == '__main__':
unittest.main()
## <LICENSE>
## Copyright 2014 HGVS Contributors (https://bitbucket.org/hgvs/hgvs)
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
## </LICENSE>
|
|
"""
Establishes a "when reviewable" flag for items, like Moodle
Also adds records for supporting AssessmentPart child management
"""
from dlkit.abstract_osid.assessment import record_templates as abc_assessment_records
from dlkit.json_.osid import record_templates as osid_records
from dlkit.json_.osid import objects as osid_objects
from dlkit.json_.osid.metadata import Metadata
from dlkit.primordium.id.primitives import Id
from dlkit.abstract_osid.osid.errors import IllegalState, InvalidArgument,\
NoAccess, NotFound
from ...osid.base_records import ObjectInitRecord
class ReviewOptionsAssessmentOfferedRecord(ObjectInitRecord):
"""when reviewable option on assessment offereds"""
_implemented_record_type_identifiers = [
'review-options'
]
def can_review_whether_correct_during_attempt(self):
"""stub"""
return bool(self.my_osid_object._my_map['reviewOptions']['whetherCorrect']['duringAttempt'])
def can_review_whether_correct_after_attempt(self):
"""stub"""
return bool(self.my_osid_object._my_map['reviewOptions']['whetherCorrect']['afterAttempt'])
def can_review_whether_correct_before_deadline(self):
"""stub"""
return bool(self.my_osid_object._my_map['reviewOptions']['whetherCorrect']['beforeDeadline'])
def can_review_whether_correct_after_deadline(self):
"""stub"""
return bool(self.my_osid_object._my_map['reviewOptions']['whetherCorrect']['afterDeadline'])
def can_review_solution_during_attempt(self):
"""stub"""
return bool(self.my_osid_object._my_map['reviewOptions']['solution']['duringAttempt'])
def can_review_solution_after_attempt(self):
"""stub"""
return bool(self.my_osid_object._my_map['reviewOptions']['solution']['afterAttempt'])
def can_review_solution_before_deadline(self):
"""stub"""
return bool(self.my_osid_object._my_map['reviewOptions']['solution']['beforeDeadline'])
def can_review_solution_after_deadline(self):
"""stub"""
return bool(self.my_osid_object._my_map['reviewOptions']['solution']['afterDeadline'])
def has_max_attempts(self):
"""stub"""
if 'maxAttempts' not in self.my_osid_object._my_map or \
self.my_osid_object._my_map['maxAttempts'] is None:
return False
return True
def get_max_attempts(self):
"""stub"""
if self.has_max_attempts():
return self.my_osid_object._my_map['maxAttempts']
raise IllegalState()
max_attempts = property(fget=get_max_attempts)
def get_object_map(self):
obj_map = dict(self.my_osid_object._my_map)
if obj_map['startTime'] is not None:
start_time = obj_map['startTime']
obj_map['startTime'] = dict()
obj_map['startTime']['year'] = start_time.year
obj_map['startTime']['month'] = start_time.month
obj_map['startTime']['day'] = start_time.day
obj_map['startTime']['hour'] = start_time.hour
obj_map['startTime']['minute'] = start_time.minute
obj_map['startTime']['second'] = start_time.second
obj_map['startTime']['microsecond'] = start_time.microsecond
if obj_map['deadline'] is not None:
deadline = obj_map['deadline']
obj_map['deadline'] = dict()
obj_map['deadline']['year'] = deadline.year
obj_map['deadline']['month'] = deadline.month
obj_map['deadline']['day'] = deadline.day
obj_map['deadline']['hour'] = deadline.hour
obj_map['deadline']['minute'] = deadline.minute
obj_map['deadline']['second'] = deadline.second
obj_map['deadline']['microsecond'] = deadline.microsecond
obj_map = osid_objects.OsidObject.get_object_map(self.my_osid_object, obj_map)
return obj_map
object_map = property(fget=get_object_map)
class ReviewOptionsAssessmentOfferedFormRecord(abc_assessment_records.AssessmentOfferedFormRecord,
osid_records.OsidRecord):
"""form to set / update the reviewable option"""
_implemented_record_type_identifiers = [
'review-options'
]
def __init__(self, osid_object_form):
if osid_object_form is not None:
self.my_osid_object_form = osid_object_form
self._init_metadata()
if not osid_object_form.is_for_update():
self._init_map()
super(ReviewOptionsAssessmentOfferedFormRecord, self).__init__()
def _init_map(self):
"""stub"""
self.my_osid_object_form._my_map['reviewOptions'] = \
dict(self._review_options_metadata['default_object_values'][0])
self.my_osid_object_form._my_map['reviewOptions']['whetherCorrect'] = \
dict(self._whether_correct_metadata['default_object_values'][0])
self.my_osid_object_form._my_map['reviewOptions']['whetherCorrect']['duringAttempt'] = \
bool(self._during_attempt_metadata['default_boolean_values'][0])
self.my_osid_object_form._my_map['reviewOptions']['whetherCorrect']['afterAttempt'] = \
bool(self._after_attempt_metadata['default_boolean_values'][0])
self.my_osid_object_form._my_map['reviewOptions']['whetherCorrect']['beforeDeadline'] = \
bool(self._before_deadline_metadata['default_boolean_values'][0])
self.my_osid_object_form._my_map['reviewOptions']['whetherCorrect']['afterDeadline'] = \
bool(self._after_deadline_metadata['default_boolean_values'][0])
self.my_osid_object_form._my_map['reviewOptions']['solution'] = \
dict(self._solutions_metadata['default_object_values'][0])
self.my_osid_object_form._my_map['reviewOptions']['solution']['duringAttempt'] = False
self.my_osid_object_form._my_map['reviewOptions']['solution']['afterAttempt'] = \
bool(self._after_attempt_metadata['default_boolean_values'][0])
self.my_osid_object_form._my_map['reviewOptions']['solution']['beforeDeadline'] = \
bool(self._before_deadline_metadata['default_boolean_values'][0])
self.my_osid_object_form._my_map['reviewOptions']['solution']['afterDeadline'] = \
bool(self._after_deadline_metadata['default_boolean_values'][0])
self.my_osid_object_form._my_map['maxAttempts'] = \
list(self._max_attempts_metadata['default_integer_values'])[0]
def _init_metadata(self):
"""stub"""
self._review_options_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'review_options'),
'element_label': 'Review Options',
'instructions': 'Choose various Review Options',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
}
self._whether_correct_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'report_correct'),
'element_label': 'Report Correct',
'instructions': 'Choose when to report correct answer to Taker',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
}
self._solutions_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'review_solutions'),
'element_label': 'Review Solutions / Explanations',
'instructions': 'Choose when to report a solution or explanation text blob, when available',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
}
self._during_attempt_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'during-attempt'),
'element_label': 'During Attempt',
'instructions': 'accepts a boolean (True/False) value',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_boolean_values': [True],
'syntax': 'BOOLEAN',
}
self._after_attempt_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'during-attempt'),
'element_label': 'During Attempt',
'instructions': 'accepts a boolean (True/False) value',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_boolean_values': [True],
'syntax': 'BOOLEAN',
}
self._before_deadline_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'during-attempt'),
'element_label': 'During Attempt',
'instructions': 'accepts a boolean (True/False) value',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_boolean_values': [True],
'syntax': 'BOOLEAN',
}
self._after_deadline_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'during-attempt'),
'element_label': 'During Attempt',
'instructions': 'accepts a boolean (True/False) value',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_boolean_values': [True],
'syntax': 'BOOLEAN',
}
self._min_max_attempts_value = None
self._max_max_attempts_value = None
self._max_attempts_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'max_attempts'),
'element_label': 'Maximum Attempts',
'instructions': 'enter an integer value for maximum attempts',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_integer_values': [None],
'syntax': 'INTEGER',
'minimum_integer': self._min_max_attempts_value,
'maximum_integer': self._max_max_attempts_value,
'integer_set': []
}
def get_review_options_metadata(self):
"""stub"""
return Metadata(**self._review_options_metadata)
def get_whether_correct_metadata(self):
"""stub"""
return Metadata(**self._whether_correct_metadata)
def get_during_attempt_metadata(self):
"""stub"""
return Metadata(**self._during_attempt_metadata)
def get_after_attempt_metadata(self):
"""stub"""
return Metadata(**self._after_attempt_metadata)
def get_before_deadline_metadata(self):
"""stub"""
return Metadata(**self._before_deadline_metadata)
def get_after_deadline_metadata(self):
"""stub"""
return Metadata(**self._after_deadline_metadata)
def set_review_whether_correct(self,
during_attempt=None,
after_attempt=None,
before_deadline=None,
after_deadline=None):
"""stub"""
whether_correct = self.my_osid_object_form._my_map['reviewOptions']['whetherCorrect']
if during_attempt is not None:
whether_correct['duringAttempt'] = bool(during_attempt)
if after_attempt is not None:
whether_correct['afterAttempt'] = bool(after_attempt)
if before_deadline is not None:
whether_correct['beforeDeadline'] = bool(before_deadline)
if after_deadline is not None:
whether_correct['afterDeadline'] = bool(after_deadline)
def set_review_solution(self,
during_attempt=None,
after_attempt=None,
before_deadline=None,
after_deadline=None):
"""stub"""
solution = self.my_osid_object_form._my_map['reviewOptions']['solution']
if during_attempt is not None:
solution['duringAttempt'] = bool(during_attempt)
if after_attempt is not None:
solution['afterAttempt'] = bool(after_attempt)
if before_deadline is not None:
solution['beforeDeadline'] = bool(before_deadline)
if after_deadline is not None:
solution['afterDeadline'] = bool(after_deadline)
def get_max_attempts_metadata(self):
"""stub"""
return Metadata(**self._max_attempts_metadata)
def set_max_attempts(self, value):
"""stub"""
if value is None:
raise InvalidArgument('value must be an integer')
if value is not None and not isinstance(value, int):
raise InvalidArgument('value is not an integer')
if not self.my_osid_object_form._is_valid_integer(value,
self.get_max_attempts_metadata()):
raise InvalidArgument('value must be an integer')
self.my_osid_object_form._my_map['maxAttempts'] = value
def clear_max_attempts(self):
"""stub"""
if (self.get_max_attempts_metadata().is_read_only() or
self.get_max_attempts_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['maxAttempts'] = \
list(self._max_attempts_metadata['default_integer_values'])[0]
class ReviewOptionsAssessmentTakenRecord(ObjectInitRecord):
"""show review options on takens too"""
_implemented_record_type_identifiers = [
'review-options'
]
def _get_section_for_question(self, question_id):
sections = self.my_osid_object._get_assessment_sections()
for section in sections:
try:
section.get_question(question_id)
return section
except NotFound:
pass
raise NotFound
def can_review_solution(self, question_id):
ao = self.my_osid_object.get_assessment_offered()
try:
section = self._get_section_for_question(question_id)
section.get_response(question_id)
attempt_complete = True
except (IllegalState, NotFound):
attempt_complete = False
if ao.can_review_solution_during_attempt() and not attempt_complete:
return True
if ao.can_review_solution_after_attempt() and attempt_complete:
return True
return False
def can_review_whether_correct(self):
"""stub"""
ao = self.my_osid_object.get_assessment_offered()
attempt_complete = self.my_osid_object.has_ended()
if ao.can_review_whether_correct_during_attempt() and not attempt_complete:
return True
if ao.can_review_whether_correct_after_attempt and attempt_complete:
return True
return False
def get_solution_for_question(self, question_id, section=None):
try:
if section is None:
section = self._get_section_for_question(question_id)
if self.can_review_solution(question_id):
item = section._get_item(question_id)
item_map = item.object_map
answers = [a.object_map for a in item.get_answers()]
try:
answers = answers + [a.object_map for a in item.get_wrong_answers()]
except AttributeError:
# no wrong answers
pass
try:
if 'solution' in item_map:
return {
'answers': answers,
'explanation': item_map['solution'] # fbw items
}
else:
return {
'answers': answers,
'explanation': item_map['texts']['solution'] # edX items
}
except KeyError:
pass
except KeyError:
pass
raise IllegalState()
def _update_object_map(self, obj_map):
"""stub"""
obj_map['reviewWhetherCorrect'] = self.can_review_whether_correct()
class ReviewOptionsAssessmentTakenFormRecord(abc_assessment_records.AssessmentTakenFormRecord,
osid_records.OsidRecord):
"""form to create / update the taken options"""
_implemented_record_type_identifiers = [
'review-options'
]
def __init__(self, osid_object_form):
super(ReviewOptionsAssessmentTakenFormRecord, self).__init__()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import textwrap
import pretend
import pytest
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
from cryptography.hazmat.primitives.serialization import (
Encoding,
PrivateFormat,
PublicFormat,
NoEncryption,
load_pem_private_key,
load_pem_public_key,
)
from warehouse.utils.sns import MessageVerifier, InvalidMessage
VALID_SIGNATURE = object()
@pytest.fixture(scope="module")
def sns_privatekey():
key = rsa.generate_private_key(
public_exponent=65537,
key_size=1024, # 1024 shouldn't be used, but for tests it's fine.
backend=default_backend(),
)
return key.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, NoEncryption())
@pytest.fixture(scope="module")
def sns_publickey(sns_privatekey):
private_key = load_pem_private_key(
sns_privatekey, password=None, backend=default_backend()
)
public_key = private_key.public_key()
return public_key.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo)
@pytest.fixture(scope="module")
def sns_certificate(sns_privatekey, sns_publickey):
one_day = datetime.timedelta(1, 0, 0)
private_key = load_pem_private_key(
sns_privatekey, password=None, backend=default_backend()
)
public_key = load_pem_public_key(sns_publickey, backend=default_backend())
builder = x509.CertificateBuilder()
builder = builder.subject_name(
x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, "sns.amazonaws.com")])
)
builder = builder.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, "sns.amazonaws.com")])
)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(x509.random_serial_number())
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.SubjectAlternativeName([x509.DNSName("sns.amazonaws.com")]), critical=False
)
builder = builder.add_extension(
x509.BasicConstraints(ca=False, path_length=None), critical=True
)
cert = builder.sign(
private_key=private_key, algorithm=hashes.SHA256(), backend=default_backend()
)
return cert.public_bytes(Encoding.PEM)
class TestMessageVerifier:
@pytest.mark.parametrize(
("topics", "data", "error"),
[
([], {}, "Unknown SignatureVersion"),
([], {"SignatureVersion": "2"}, "Unknown SignatureVersion"),
(
[],
{
"SignatureVersion": "1",
"SigningCertURL": "http://sns.us-west-2.amazonaws.com/cert.pem",
},
"Invalid scheme for SigningCertURL",
),
(
[],
{
"SignatureVersion": "1",
"SigningCertURL": "https://sns.us-west-2.attacker.com/cert.pem",
},
"Invalid location for SigningCertURL",
),
(
[],
{
"SignatureVersion": "1",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Signature": "SNYwQnC0BxjSo2E4aZFRiA==",
"Type": "Who Knows?",
},
"Invalid Type",
),
(
[],
{
"SignatureVersion": "1",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Signature": "SNYwQnC0BxjSo2E4aZFRiA==",
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
),
"TopicArn": "This is My Topic",
},
"Invalid Signature",
),
(
[],
{
"SignatureVersion": "1",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Signature": VALID_SIGNATURE,
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": "Wat?",
"TopicArn": "This is My Topic",
},
"Unknown Timestamp format",
),
(
[],
{
"SignatureVersion": "1",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Signature": VALID_SIGNATURE,
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
(
datetime.datetime.utcnow() - datetime.timedelta(days=1)
).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
),
"TopicArn": "This is My Topic",
},
"Message has expired",
),
(
["The topic I expected"],
{
"SignatureVersion": "1",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Signature": VALID_SIGNATURE,
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
),
"TopicArn": "This topic I got but didn't expect",
},
"Invalid TopicArn",
),
],
)
def test_invalid(self, sns_certificate, sns_privatekey, topics, data, error):
response = pretend.stub(raise_for_status=lambda: None, content=sns_certificate)
session = pretend.stub(get=lambda url: response)
verifier = MessageVerifier(topics=topics, session=session)
if data.get("Signature") is VALID_SIGNATURE:
private_key = load_pem_private_key(
sns_privatekey, password=None, backend=default_backend()
)
signature_bytes = private_key.sign(
verifier._get_data_to_sign(data), PKCS1v15(), hashes.SHA1()
)
data["Signature"] = base64.b64encode(signature_bytes)
with pytest.raises(InvalidMessage, match=error):
verifier.verify(data)
@pytest.mark.parametrize(
("topics", "data"),
[
(
["valid topic"],
{
"SignatureVersion": "1",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
),
"TopicArn": "valid topic",
},
),
(
["valid topic", "another valid topic"],
{
"SignatureVersion": "1",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
),
"TopicArn": "another valid topic",
},
),
(
["valid topic"],
{
"SignatureVersion": "1",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Type": "Notification",
"Subject": "This is a subject",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
),
"TopicArn": "valid topic",
},
),
(
["valid topic"],
{
"SignatureVersion": "1",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Type": "SubscriptionConfirmation",
"Message": "This is My Message",
"MessageId": "1",
"SubscribeURL": "https://example.com/subscribe",
"Token": "1234",
"Timestamp": (
datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
),
"TopicArn": "valid topic",
},
),
],
)
def test_valid(self, sns_certificate, sns_privatekey, topics, data):
response = pretend.stub(raise_for_status=lambda: None, content=sns_certificate)
session = pretend.stub(get=lambda url: response)
verifier = MessageVerifier(topics=topics, session=session)
private_key = load_pem_private_key(
sns_privatekey, password=None, backend=default_backend()
)
signature_bytes = private_key.sign(
verifier._get_data_to_sign(data), PKCS1v15(), hashes.SHA1()
)
data["Signature"] = base64.b64encode(signature_bytes)
verifier.verify(data)
@pytest.mark.parametrize(
("data", "expected"),
[
(
{
"Type": "SubscriptionConfirmation",
"Message": "This is My Message",
"MessageId": "1",
"SubscribeURL": "https://example.com/subscribe",
"Token": "1234",
"Timestamp": "2018-04-08T17:01:40.114582Z",
"TopicArn": "valid topic",
},
"""
Message
This is My Message
MessageId
1
SubscribeURL
https://example.com/subscribe
Timestamp
2018-04-08T17:01:40.114582Z
Token
1234
TopicArn
valid topic
Type
SubscriptionConfirmation
""",
),
(
{
"Type": "Notification",
"Subject": "This is a subject",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": "2018-04-08T17:01:40.114582Z",
"TopicArn": "valid topic",
},
"""
Message
This is My Message
MessageId
1
Subject
This is a subject
Timestamp
2018-04-08T17:01:40.114582Z
TopicArn
valid topic
Type
Notification
""",
),
(
{
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": "2018-04-08T17:01:40.114582Z",
"TopicArn": "valid topic",
},
"""
Message
This is My Message
MessageId
1
Timestamp
2018-04-08T17:01:40.114582Z
TopicArn
valid topic
Type
Notification
""",
),
],
)
def test_signature_data(self, data, expected):
# We have this method tested specifically, because the above tests
# don't actually test if this method is functioning correctly,
# since it uses it for the input and the expected.
verifier = MessageVerifier(topics=[], session=pretend.stub())
expected = textwrap.dedent(expected).lstrip().encode("utf8")
assert verifier._get_data_to_sign(data) == expected
|
|
""" Unit tests for visibility operations
"""
import unittest
import numpy
from numpy.testing import assert_allclose
from astropy.coordinates import SkyCoord
import astropy.units as u
from data_models.memory_data_models import Skycomponent
from data_models.polarisation import PolarisationFrame
from processing_components.simulation.configurations import create_named_configuration
from processing_components.imaging.base import predict_skycomponent_visibility
from processing_components.visibility.coalesce import convert_blockvisibility_to_visibility
from processing_components.visibility.operations import append_visibility, qa_visibility, \
sum_visibility, subtract_visibility, divide_visibility
from processing_components.visibility.base import copy_visibility, create_visibility, create_blockvisibility, \
create_visibility_from_rows, phaserotate_visibility
class TestVisibilityOperations(unittest.TestCase):
def setUp(self):
self.lowcore = create_named_configuration('LOWBD2-CORE')
self.times = (numpy.pi / 43200.0) * numpy.arange(0.0, 300.0, 30.0)
self.frequency = numpy.linspace(1.0e8, 1.1e8, 3)
self.channel_bandwidth = numpy.array([1e7, 1e7, 1e7])
# Define the component and give it some spectral behaviour
f = numpy.array([100.0, 20.0, -10.0, 1.0])
self.flux = numpy.array([f, 0.8 * f, 0.6 * f])
# The phase centre is absolute and the component is specified relative (for now).
# This means that the component should end up at the position phasecentre+compredirection
self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
self.compabsdirection = SkyCoord(ra=+181.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
pcof = self.phasecentre.skyoffset_frame()
self.compreldirection = self.compabsdirection.transform_to(pcof)
self.comp = Skycomponent(direction=self.compreldirection, frequency=self.frequency, flux=self.flux)
def test_sum_visibility(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
polarisation_frame=PolarisationFrame("linear"),
weight=1.0)
self.vis = predict_skycomponent_visibility(self.vis, self.comp)
flux, weight = sum_visibility(self.vis, self.comp.direction)
assert numpy.max(numpy.abs(flux - self.flux)) < 1e-7
def test_create_visibility1(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
weight=1.0)
assert self.vis.nvis == len(self.vis.time)
assert self.vis.nvis == len(self.vis.frequency)
def test_create_visibility_polarisation(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("linear"))
assert self.vis.nvis == len(self.vis.time)
assert self.vis.nvis == len(self.vis.frequency)
def test_create_visibility_from_rows1(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0)
rows = self.vis.time > 150.0
for makecopy in [True, False]:
selected_vis = create_visibility_from_rows(self.vis, rows, makecopy=makecopy)
assert selected_vis.nvis == numpy.sum(numpy.array(rows))
def test_create_visibility_time(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency, phasecentre=self.phasecentre,
weight=1.0, channel_bandwidth=self.channel_bandwidth)
assert self.vis.nvis == len(self.vis.time)
def test_convert_blockvisibility(self):
self.vis = create_blockvisibility(self.lowcore, self.times, self.frequency, phasecentre=self.phasecentre,
weight=1.0, channel_bandwidth=self.channel_bandwidth)
vis = convert_blockvisibility_to_visibility(self.vis)
assert vis.nvis == len(vis.time)
assert numpy.unique(vis.time).size == self.vis.time.size # pylint: disable=no-member
def test_create_visibility_from_rows_makecopy(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency, phasecentre=self.phasecentre,
weight=1.0, channel_bandwidth=self.channel_bandwidth)
rows = self.vis.time > 150.0
for makecopy in [True, False]:
selected_vis = create_visibility_from_rows(self.vis, rows, makecopy=makecopy)
assert selected_vis.nvis == numpy.sum(numpy.array(rows))
def test_append_visibility(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
weight=1.0)
othertimes = (numpy.pi / 43200.0) * numpy.arange(300.0, 600.0, 30.0)
self.othervis = create_visibility(self.lowcore, othertimes, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
weight=1.0)
self.vis = append_visibility(self.vis, self.othervis)
assert self.vis.nvis == len(self.vis.time)
assert self.vis.nvis == len(self.vis.frequency)
def test_divide_visibility(self):
self.vis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
weight=1.0, polarisation_frame=PolarisationFrame("stokesIQUV"))
self.vis.data['vis'][...,:] = [2.0+0.0j, 0.0j, 0.0j, 2.0+0.0j]
self.othervis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
weight=1.0, polarisation_frame=PolarisationFrame("stokesIQUV"))
self.othervis.data['vis'][...,:] = [1.0+0.0j, 0.0j, 0.0j, 1.0+0.0j]
self.ratiovis = divide_visibility(self.vis, self.othervis)
assert self.ratiovis.nvis == self.vis.nvis
assert numpy.max(numpy.abs(self.ratiovis.vis)) == 2.0, numpy.max(numpy.abs(self.ratiovis.vis))
def test_copy_visibility(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
vis = copy_visibility(self.vis)
self.vis.data['vis'] = 0.0
vis.data['vis'] = 1.0
assert (vis.data['vis'][0, 0].real == 1.0)
assert (self.vis.data['vis'][0, 0].real == 0.0)
def test_phase_rotation_identity(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
self.vismodel = predict_skycomponent_visibility(self.vis, self.comp)
newphasecenters = [SkyCoord(182, -35, unit=u.deg), SkyCoord(182, -30, unit=u.deg),
SkyCoord(177, -30, unit=u.deg), SkyCoord(176, -35, unit=u.deg),
SkyCoord(216, -35, unit=u.deg), SkyCoord(180, -70, unit=u.deg)]
for newphasecentre in newphasecenters:
# Phase rotating back should not make a difference
original_vis = self.vismodel.vis
original_uvw = self.vismodel.uvw
rotatedvis = phaserotate_visibility(phaserotate_visibility(self.vismodel, newphasecentre, tangent=False),
self.phasecentre, tangent=False)
assert_allclose(rotatedvis.uvw, original_uvw, rtol=1e-7)
assert_allclose(rotatedvis.vis, original_vis, rtol=1e-7)
def test_phase_rotation(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
self.vismodel = predict_skycomponent_visibility(self.vis, self.comp)
# Predict visibilities with new phase centre independently
ha_diff = -(self.compabsdirection.ra - self.phasecentre.ra).to(u.rad).value
vispred = create_visibility(self.lowcore, self.times + ha_diff, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.compabsdirection, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
vismodel2 = predict_skycomponent_visibility(vispred, self.comp)
# Should yield the same results as rotation
rotatedvis = phaserotate_visibility(self.vismodel, newphasecentre=self.compabsdirection, tangent=False)
assert_allclose(rotatedvis.vis, vismodel2.vis, rtol=3e-6)
assert_allclose(rotatedvis.uvw, vismodel2.uvw, rtol=3e-6)
def test_phase_rotation_block(self):
self.vis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
self.vismodel = predict_skycomponent_visibility(self.vis, self.comp)
# Predict visibilities with new phase centre independently
ha_diff = -(self.compabsdirection.ra - self.phasecentre.ra).to(u.rad).value
vispred = create_blockvisibility(self.lowcore, self.times + ha_diff, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.compabsdirection, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
vismodel2 = predict_skycomponent_visibility(vispred, self.comp)
# Should yield the same results as rotation
rotatedvis = phaserotate_visibility(self.vismodel, newphasecentre=self.compabsdirection, tangent=False)
assert_allclose(rotatedvis.vis, vismodel2.vis, rtol=3e-6)
assert_allclose(rotatedvis.uvw, vismodel2.uvw, rtol=3e-6)
def test_phase_rotation_inverse(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
self.vismodel = predict_skycomponent_visibility(self.vis, self.comp)
there = SkyCoord(ra=+250.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
# Phase rotating back should not make a difference
original_vis = self.vismodel.vis
original_uvw = self.vismodel.uvw
rotatedvis = phaserotate_visibility(phaserotate_visibility(self.vismodel, there, tangent=False,
inverse=True),
self.phasecentre, tangent=False, inverse=True)
assert_allclose(rotatedvis.uvw, original_uvw, rtol=1e-7)
assert_allclose(rotatedvis.vis, original_vis, rtol=1e-7)
def test_phase_rotation_block(self):
self.vis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
self.vismodel = predict_skycomponent_visibility(self.vis, self.comp)
# Predict visibilities with new phase centre independently
ha_diff = -(self.compabsdirection.ra - self.phasecentre.ra).to(u.rad).value
vispred = create_blockvisibility(self.lowcore, self.times + ha_diff, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.compabsdirection, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
vismodel2 = predict_skycomponent_visibility(vispred, self.comp)
# Should yield the same results as rotation
rotatedvis = phaserotate_visibility(self.vismodel, newphasecentre=self.compabsdirection, tangent=False)
assert_allclose(rotatedvis.vis, vismodel2.vis, rtol=3e-6)
assert_allclose(rotatedvis.uvw, vismodel2.uvw, rtol=3e-6)
def test_phase_rotation_inverse_block(self):
self.vis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
self.vismodel = predict_skycomponent_visibility(self.vis, self.comp)
there = SkyCoord(ra=+250.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
# Phase rotating back should not make a difference
original_vis = self.vismodel.vis
original_uvw = self.vismodel.uvw
rotatedvis = phaserotate_visibility(phaserotate_visibility(self.vismodel, there, tangent=False,
inverse=True),
self.phasecentre, tangent=False, inverse=True)
assert_allclose(rotatedvis.uvw, original_uvw, rtol=1e-7)
assert_allclose(rotatedvis.vis, original_vis, rtol=1e-7)
def test_subtract(self):
vis1 = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
vis1.data['vis'][...] = 1.0
vis2 = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
vis2.data['vis'][...] = 1.0
zerovis = subtract_visibility(vis1, vis2)
qa = qa_visibility(zerovis, context='test_qa')
self.assertAlmostEqual(qa.data['maxabs'], 0.0, 7)
def test_qa(self):
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"))
self.vismodel = predict_skycomponent_visibility(self.vis, self.comp)
qa = qa_visibility(self.vis, context='test_qa')
self.assertAlmostEqual(qa.data['maxabs'], 100.0, 7)
self.assertAlmostEqual(qa.data['medianabs'], 11.0, 7)
assert qa.context == 'test_qa'
def test_elevation(self):
self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=+15.0 * u.deg, frame='icrs', equinox='J2000')
self.times = (numpy.pi / 43200.0) * numpy.arange(-43200, +43200, 3600.0)
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"),
elevation_limit=numpy.pi * 15.0/180.0)
n_elevation_limit = len(numpy.unique(self.vis.time))
self.vis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"),
elevation_limit=None)
assert len(numpy.unique(self.vis.time)) >= n_elevation_limit
def test_elevation_block(self):
self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=+15.0 * u.deg, frame='icrs', equinox='J2000')
self.times = (numpy.pi / 43200.0) * numpy.arange(-43200, +43200, 3600.0)
self.vis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"),
elevation_limit=numpy.pi * 15.0/180.0)
n_elevation_limit = len(numpy.unique(self.vis.time))
self.vis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame("stokesIQUV"),
elevation_limit=None)
assert len(numpy.unique(self.vis.time)) >= n_elevation_limit
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance test for a peer review system.
WARNING! Use this script to test load U-MOOC. This is very dangerous
feature, be careful, because anyone can impersonate super user of your Course
Builder instance; use only if you have to perform specific load testing
Keep in mind:
- when repeatedly running tests and creating new test namespaces,
flush memcache
Here is how to run:
- update /controllers/sites.py and enable CAN_IMPERSONATE
- navigate to the root directory of the app
- run a command line by typing:
python tests/integration/load_test.py \
--thread_count=5 \
--start_uid=1 \
http://mycourse.appspot.com
"""
__author__ = 'Pavel Simakov ([email protected])'
import argparse
import cookielib
import json
import logging
import random
import re
import sys
import threading
import time
import urllib
import urllib2
# The unit id for the peer review assignment in the default course.
LEGACY_REVIEW_UNIT_ID = 'ReviewAssessmentExample'
# command line arguments parser
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'base_url', help=('Base URL of the course you want to test'), type=str)
PARSER.add_argument(
'--start_uid',
help='Initial value for unique thread identifier.', default=1, type=int)
PARSER.add_argument(
'--thread_count',
help='Number of concurrent threads for executing the test.',
default=1, type=int)
PARSER.add_argument(
'--iteration_count',
help='Number of iterations for executing the test. Each thread of each '
'iteration acts as a unique user with the uid equal to:'
'start_uid + thread_count * iteration_index.',
default=1, type=int)
def assert_contains(needle, haystack):
if not needle in haystack:
raise Exception('Expected to find term: %s\n%s', needle, haystack)
def assert_does_not_contain(needle, haystack):
if needle in haystack:
raise Exception('Did not expect to find term: %s\n%s', needle, haystack)
def assert_equals(expected, actual):
if expected != actual:
raise Exception('Expected equality of %s and %s.', expected, actual)
class WebSession(object):
"""A class that allows navigation of web pages keeping cookie session."""
PROGRESS_LOCK = threading.Lock()
MAX_RETRIES = 3
RETRY_SLEEP_SEC = 3
GET_COUNT = 0
POST_COUNT = 0
RETRY_COUNT = 0
PROGRESS_BATCH = 10
RESPONSE_TIME_HISTOGRAM = [0, 0, 0, 0, 0, 0]
def __init__(self, uid, common_headers=None):
if common_headers is None:
common_headers = {}
self.uid = uid
self.common_headers = common_headers
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
@classmethod
def increment_duration_bucket(cls, index):
cls.RESPONSE_TIME_HISTOGRAM[index] += 1
@classmethod
def update_duration(cls, duration):
if duration > 30:
cls.increment_duration_bucket(0)
elif duration > 15:
cls.increment_duration_bucket(1)
elif duration > 7:
cls.increment_duration_bucket(2)
elif duration > 3:
cls.increment_duration_bucket(3)
elif duration > 1:
cls.increment_duration_bucket(4)
else:
cls.increment_duration_bucket(5)
@classmethod
def log_progress(cls, force=False):
update = ((cls.GET_COUNT + cls.POST_COUNT) % (
cls.PROGRESS_BATCH) == 0)
if update or force:
logging.info(
'GET/POST:[%s, %s], RETRIES:[%s], SLA:%s',
cls.GET_COUNT, cls.POST_COUNT, cls.RETRY_COUNT,
cls.RESPONSE_TIME_HISTOGRAM)
def get_cookie_value(self, name):
for cookie in self.cj:
if cookie.name == name:
return cookie.value
return None
def is_soft_error(self, http_error):
"""Checks if HTTPError is due to starvation of frontend instances."""
body = http_error.fp.read()
# this is the text specific to the front end instance starvation, which
# is a retriable error for both GET and POST; normal HTTP error 500 has
# this specific text '<h1>500 Internal Server Error</h1>'
if http_error.code == 500 and '<h1>Error: Server Error</h1>' in body:
return True
logging.error(
'Non-retriable HTTP %s error:\n%s', http_error.code, body)
return False
def open(self, request, hint):
"""Executes any HTTP request."""
start_time = time.time()
try:
try_count = 0
while True:
try:
return self.opener.open(request)
except urllib2.HTTPError as he:
if (
try_count < WebSession.MAX_RETRIES and
self.is_soft_error(he)):
try_count += 1
with WebSession.PROGRESS_LOCK:
WebSession.RETRY_COUNT += 1
time.sleep(WebSession.RETRY_SLEEP_SEC)
continue
raise he
except Exception as e:
logging.info(
'Error in session %s executing: %s', self.uid, hint)
raise e
finally:
with WebSession.PROGRESS_LOCK:
self.update_duration(time.time() - start_time)
def get(self, url, expected_code=200):
"""HTTP GET."""
with WebSession.PROGRESS_LOCK:
WebSession.GET_COUNT += 1
self.log_progress()
request = urllib2.Request(url)
for key, value in self.common_headers.items():
request.add_header(key, value)
response = self.open(request, 'GET %s' % url)
assert_equals(expected_code, response.code)
return response.read()
def post(self, url, args_dict, expected_code=200):
"""HTTP POST."""
with WebSession.PROGRESS_LOCK:
WebSession.POST_COUNT += 1
self.log_progress()
data = None
if args_dict:
data = urllib.urlencode(args_dict)
request = urllib2.Request(url, data)
for key, value in self.common_headers.items():
request.add_header(key, value)
response = self.open(request, 'POST %s' % url)
assert_equals(expected_code, response.code)
return response.read()
class TaskThread(threading.Thread):
"""Runs a task in a separate thread."""
def __init__(self, func, name=None):
super(TaskThread, self).__init__()
self.func = func
self.exception = None
self.name = name
@classmethod
def start_all_tasks(cls, tasks):
"""Starts all tasks."""
for task in tasks:
task.start()
@classmethod
def check_all_tasks(cls, tasks):
"""Checks results of all tasks; fails on the first exception found."""
failed_count = 0
for task in tasks:
while True:
# Timeouts should happen after 30 seconds.
task.join(30)
if task.isAlive():
logging.info('Still waiting for: %s.', task.name)
continue
else:
break
if task.exception:
failed_count += 1
if failed_count:
raise Exception('Tasks failed: %s', failed_count)
@classmethod
def execute_task_list(cls, tasks):
"""Starts all tasks and checks the results."""
cls.start_all_tasks(tasks)
cls.check_all_tasks(tasks)
def run(self):
try:
self.func()
except Exception as e: # pylint: disable-msg=broad-except
logging.error('Error in %s: %s', self.name, e)
self.exc_info = sys.exc_info()
raise self.exc_info[1], None, self.exc_info[2]
class PeerReviewLoadTest(object):
"""A peer review load test."""
def __init__(self, base_url, uid):
self.uid = uid
self.host = base_url
# this is an impersonation identity for the actor thread
self.email = 'load_test_bot_%[email protected]' % self.uid
self.name = 'Load Test Bot #%s' % self.uid
# begin web session
impersonate_header = {
'email': self.email, 'user_id': u'impersonation-%s' % self.uid}
self.session = WebSession(
uid=uid,
common_headers={'Gcb-Impersonate': json.dumps(impersonate_header)})
def run(self):
self.register_if_has_to()
self.submit_peer_review_assessment_if_possible()
while self.count_completed_reviews() < 2:
self.request_and_do_a_review()
def get_hidden_field(self, name, body):
# The "\s*" denotes arbitrary whitespace; sometimes, this tag is split
# across multiple lines in the HTML.
# pylint: disable-msg=anomalous-backslash-in-string
reg = re.compile(
'<input type="hidden" name="%s"\s* value="([^"]*)">' % name)
# pylint: enable-msg=anomalous-backslash-in-string
return reg.search(body).group(1)
def get_js_var(self, name, body):
reg = re.compile('%s = \'([^\']*)\';\n' % name)
return reg.search(body).group(1)
def get_draft_review_url(self, body):
"""Returns the URL of a draft review on the review dashboard."""
# The "\s*" denotes arbitrary whitespace; sometimes, this tag is split
# across multiple lines in the HTML.
# pylint: disable-msg=anomalous-backslash-in-string
reg = re.compile(
'<a href="([^"]*)">Assignment [0-9]+</a>\s*\(Draft\)')
# pylint: enable-msg=anomalous-backslash-in-string
result = reg.search(body)
if result is None:
return None
return result.group(1)
def register_if_has_to(self):
"""Performs student registration action."""
body = self.session.get('%s/' % self.host)
assert_contains('Logout', body)
if not 'href="register"' in body:
body = self.session.get('%s/student/home' % self.host)
assert_contains(self.email, body)
assert_contains(self.name, body)
return False
body = self.session.get('%s/register' % self.host)
xsrf_token = self.get_hidden_field('xsrf_token', body)
data = {'xsrf_token': xsrf_token, 'form01': self.name}
body = self.session.post('%s/register' % self.host, data)
body = self.session.get('%s/' % self.host)
assert_contains('Logout', body)
assert_does_not_contain('href="register"', body)
return True
def submit_peer_review_assessment_if_possible(self):
"""Submits the peer review assessment."""
body = self.session.get(
'%s/assessment?name=%s' % (self.host, LEGACY_REVIEW_UNIT_ID))
assert_contains('You may only submit this assignment once', body)
if 'Submitted assignment' in body:
# The assignment was already submitted.
return True
assessment_xsrf_token = self.get_js_var('assessmentXsrfToken', body)
answers = [
{'index': 0, 'type': 'regex',
'value': 'Answer 0 by %s' % self.email},
{'index': 1, 'type': 'choices', 'value': self.uid},
{'index': 2, 'type': 'regex',
'value': 'Answer 2 by %s' % self.email},
]
data = {
'answers': json.dumps(answers),
'assessment_type': LEGACY_REVIEW_UNIT_ID,
'score': 0,
'xsrf_token': assessment_xsrf_token,
}
body = self.session.post('%s/answer' % self.host, data)
assert_contains('Review peer assignments', body)
return True
def request_and_do_a_review(self):
"""Request a new review, wait for it to be granted, then submit it."""
review_dashboard_url = (
'%s/reviewdashboard?unit=%s' % (self.host, LEGACY_REVIEW_UNIT_ID))
completed = False
while not completed:
# Get peer review dashboard and inspect it.
body = self.session.get(review_dashboard_url)
assert_contains('Assignments for your review', body)
assert_contains('Review a new assignment', body)
# Pick first pending review if any or ask for a new review.
draft_review_url = self.get_draft_review_url(body)
if draft_review_url: # There is a pending review. Choose it.
body = self.session.get(
'%s/%s' % (self.host, draft_review_url))
else: # Request a new assignment to review.
assert_contains('xsrf_token', body)
xsrf_token = self.get_hidden_field('xsrf_token', body)
data = {
'unit_id': LEGACY_REVIEW_UNIT_ID,
'xsrf_token': xsrf_token,
}
body = self.session.post(review_dashboard_url, data)
# It is possible that we fail to get a new review because the
# old one is now visible, but was not yet visible when we asked
# for the dashboard page.
if (
'You must complete all assigned reviews before you '
'can request a new one.' in body):
continue
# It is possible that no submissions available for review yet.
# Wait for a while until they become available on the dashboard
# page.
if not 'Back to the review dashboard' in body:
assert_contains('Assignments for your review', body)
# Sleep for a random number of seconds between 1 and 4.
time.sleep(1.0 + random.random() * 3.0)
continue
# Submit the review.
review_xsrf_token = self.get_js_var('assessmentXsrfToken', body)
answers = [
{'index': 0, 'type': 'choices', 'value': 0},
{'index': 1, 'type': 'regex',
'value': 'Review 0 by %s' % self.email},
]
data = {
'answers': json.dumps(answers),
'assessment_type': None,
'is_draft': 'false',
'key': self.get_js_var('assessmentGlobals.key', body),
'score': 0,
'unit_id': LEGACY_REVIEW_UNIT_ID,
'xsrf_token': review_xsrf_token,
}
body = self.session.post('%s/review' % self.host, data)
assert_contains('Your review has been submitted', body)
return True
def count_completed_reviews(self):
"""Counts the number of reviews that the actor has completed."""
review_dashboard_url = (
'%s/reviewdashboard?unit=%s' % (self.host, LEGACY_REVIEW_UNIT_ID))
body = self.session.get(review_dashboard_url)
num_completed = body.count('(Completed)')
return num_completed
def run_all(args):
"""Runs test scenario in multiple threads."""
if args.thread_count < 1 or args.thread_count > 256:
raise Exception('Please use between 1 and 256 threads.')
start_time = time.time()
logging.info('Started testing: %s', args.base_url)
logging.info('base_url: %s', args.base_url)
logging.info('start_uid: %s', args.start_uid)
logging.info('thread_count: %s', args.thread_count)
logging.info('iteration_count: %s', args.iteration_count)
logging.info('SLAs are [>30s, >15s, >7s, >3s, >1s, <1s]')
try:
for iteration_index in range(0, args.iteration_count):
logging.info('Started iteration: %s', iteration_index)
tasks = []
WebSession.PROGRESS_BATCH = args.thread_count
for index in range(0, args.thread_count):
test = PeerReviewLoadTest(
args.base_url,
(
args.start_uid +
iteration_index * args.thread_count +
index))
task = TaskThread(
test.run, name='PeerReviewLoadTest-%s' % index)
tasks.append(task)
try:
TaskThread.execute_task_list(tasks)
except Exception as e:
logging.info('Failed iteration: %s', iteration_index)
raise e
finally:
WebSession.log_progress(force=True)
logging.info('Done! Duration (s): %s', time.time() - start_time)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
run_all(PARSER.parse_args())
|
|
'''
Video
=====
Core class for reading video files and managing the
:class:`kivy.graphics.texture.Texture` video.
.. versionchanged:: 1.8.0
There is now 2 distinct Gstreamer implementation: one using Gi/Gst working
for both Python 2+3 with Gstreamer 1.0, and one using PyGST working only for
Python 2 + Gstreamer 0.10.
If you have issue with GStreamer, have a look at
:ref:`gstreamer-compatibility`
.. note::
Recording is not supported.
'''
__all__ = ('VideoBase', 'Video')
from kivy.clock import Clock
from kivy.core import core_select_lib
from kivy.event import EventDispatcher
from kivy.logger import Logger
from kivy.compat import PY2
class VideoBase(EventDispatcher):
'''VideoBase, a class used to implement a video reader.
:Parameters:
`filename` : str
Filename of the video. Can be a file or an URI.
`eos` : str, defaults to 'pause'
Action to take when EOS is hit. Can be one of 'pause', 'stop' or
'loop'.
.. versionchanged::
added 'pause'
`async` : bool, defaults to True
Load the video asynchronously (may be not supported by all
providers).
`autoplay` : bool, defaults to False
Auto play the video on init.
:Events:
`on_eos`
Fired when EOS is hit.
`on_load`
Fired when the video is loaded and the texture is available.
`on_frame`
Fired when a new frame is written to the texture.
'''
__slots__ = ('_wantplay', '_buffer', '_filename', '_texture',
'_volume', 'eos', '_state', '_async', '_autoplay')
__events__ = ('on_eos', 'on_load', 'on_frame')
def __init__(self, **kwargs):
kwargs.setdefault('filename', None)
kwargs.setdefault('eos', 'stop')
kwargs.setdefault('async', True)
kwargs.setdefault('autoplay', False)
super(VideoBase, self).__init__()
self._wantplay = False
self._buffer = None
self._filename = None
self._texture = None
self._volume = 1.
self._state = ''
self._autoplay = kwargs.get('autoplay')
self._async = kwargs.get('async')
self.eos = kwargs.get('eos')
if self.eos == 'pause':
Logger.warning("'pause' is deprecated. Use 'stop' instead.")
self.eos = 'stop'
self.filename = kwargs.get('filename')
Clock.schedule_interval(self._update, 1 / 30.)
if self._autoplay:
self.play()
def __del__(self):
self.unload()
def on_eos(self):
pass
def on_load(self):
pass
def on_frame(self):
pass
def _get_filename(self):
return self._filename
def _set_filename(self, filename):
if filename == self._filename:
return
self.unload()
self._filename = filename
if self._filename is None:
return
self.load()
filename = property(lambda self: self._get_filename(),
lambda self, x: self._set_filename(x),
doc='Get/set the filename/uri of the current video')
def _get_position(self):
return 0
def _set_position(self, pos):
self.seek(pos)
position = property(lambda self: self._get_position(),
lambda self, x: self._set_position(x),
doc='Get/set the position in the video (in seconds)')
def _get_volume(self):
return self._volume
def _set_volume(self, volume):
self._volume = volume
volume = property(lambda self: self._get_volume(),
lambda self, x: self._set_volume(x),
doc='Get/set the volume in the video (1.0 = 100%)')
def _get_duration(self):
return 0
duration = property(lambda self: self._get_duration(),
doc='Get the video duration (in seconds)')
def _get_texture(self):
return self._texture
texture = property(lambda self: self._get_texture(),
doc='Get the video texture')
def _get_state(self):
return self._state
state = property(lambda self: self._get_state(),
doc='Get the video playing status')
def _do_eos(self, *args):
'''.. versionchanged:: 1.4.0
Now dispatches the `on_eos` event.
'''
if self.eos == 'pause':
self.pause()
elif self.eos == 'stop':
self.stop()
elif self.eos == 'loop':
self.position = 0
self.play()
self.dispatch('on_eos')
def _update(self, dt):
'''Update the video content to texture.
'''
pass
def seek(self, percent):
'''Move on percent position'''
pass
def stop(self):
'''Stop the video playing'''
self._state = ''
def pause(self):
'''Pause the video
.. versionadded:: 1.4.0
'''
self._state = 'paused'
def play(self):
'''Play the video'''
self._state = 'playing'
def load(self):
'''Load the video from the current filename'''
pass
def unload(self):
'''Unload the actual video'''
self._state = ''
# Load the appropriate provider
video_providers = []
try:
from kivy.lib.gstplayer import GstPlayer
video_providers += [('gstplayer', 'video_gstplayer', 'VideoGstplayer')]
except ImportError:
#video_providers += [('gi', 'video_gi', 'VideoGi')]
if PY2:
# if peoples do not have gi, fallback on pygst, only for python2
video_providers += [
('pygst', 'video_pygst', 'VideoPyGst')]
video_providers += [
('ffmpeg', 'video_ffmpeg', 'VideoFFMpeg'),
('pyglet', 'video_pyglet', 'VideoPyglet'),
('null', 'video_null', 'VideoNull')]
Video = core_select_lib('video', video_providers)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2016-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, local_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, local_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a local network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local
network gateway operation.
:type parameters:
~azure.mgmt.network.v2016_12_01.models.LocalNetworkGateway
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns LocalNetworkGateway or
ClientRawResponse<LocalNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_12_01.models.LocalNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2016_12_01.models.LocalNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'}
def get(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LocalNetworkGateway or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2016_12_01.models.LocalNetworkGateway or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'}
def _delete_initial(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LocalNetworkGateway
:rtype:
~azure.mgmt.network.v2016_12_01.models.LocalNetworkGatewayPaged[~azure.mgmt.network.v2016_12_01.models.LocalNetworkGateway]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'}
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CatalogPage'
db.create_table(u'ga_resources_catalogpage', (
(u'page_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.Page'], unique=True, primary_key=True)),
('public', self.gf('django.db.models.fields.BooleanField')(default=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
))
db.send_create_signal(u'ga_resources', ['CatalogPage'])
# Adding model 'DataResource'
db.create_table(u'ga_resources_dataresource', (
(u'page_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.Page'], unique=True, primary_key=True)),
('content', self.gf('mezzanine.core.fields.RichTextField')()),
('resource_file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)),
('resource_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('resource_config', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('last_change', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('last_refresh', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('next_refresh', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('refresh_every', self.gf('timedelta.fields.TimedeltaField')(null=True, blank=True)),
('md5sum', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('metadata_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('metadata_xml', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('native_bounding_box', self.gf('django.contrib.gis.db.models.fields.PolygonField')(null=True, blank=True)),
('bounding_box', self.gf('django.contrib.gis.db.models.fields.PolygonField')(null=True, blank=True)),
('three_d', self.gf('django.db.models.fields.BooleanField')(default=False)),
('native_srs', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('public', self.gf('django.db.models.fields.BooleanField')(default=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('driver', self.gf('django.db.models.fields.CharField')(default='ga_resources.drivers.spatialite', max_length=255)),
('big', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'ga_resources', ['DataResource'])
# Adding model 'OrderedResource'
db.create_table(u'ga_resources_orderedresource', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('resource_group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ga_resources.ResourceGroup'])),
('data_resource', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ga_resources.DataResource'])),
('ordering', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'ga_resources', ['OrderedResource'])
# Adding model 'ResourceGroup'
db.create_table(u'ga_resources_resourcegroup', (
(u'page_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.Page'], unique=True, primary_key=True)),
('is_timeseries', self.gf('django.db.models.fields.BooleanField')(default=False)),
('min_time', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('max_time', self.gf('django.db.models.fields.DateTimeField')(null=True)),
))
db.send_create_signal(u'ga_resources', ['ResourceGroup'])
# Adding model 'RelatedResource'
db.create_table(u'ga_resources_relatedresource', (
(u'page_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.Page'], unique=True, primary_key=True)),
('content', self.gf('mezzanine.core.fields.RichTextField')()),
('resource_file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('foreign_resource', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ga_resources.DataResource'])),
('foreign_key', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('local_key', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('left_index', self.gf('django.db.models.fields.BooleanField')(default=False)),
('right_index', self.gf('django.db.models.fields.BooleanField')(default=False)),
('how', self.gf('django.db.models.fields.CharField')(default='left', max_length=8)),
('driver', self.gf('django.db.models.fields.CharField')(default='ga_resources.drivers.related.excel', max_length=255)),
('key_transform', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal(u'ga_resources', ['RelatedResource'])
# Adding model 'Style'
db.create_table(u'ga_resources_style', (
(u'page_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.Page'], unique=True, primary_key=True)),
('legend', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('legend_width', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('legend_height', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('stylesheet', self.gf('django.db.models.fields.TextField')()),
('public', self.gf('django.db.models.fields.BooleanField')(default=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
))
db.send_create_signal(u'ga_resources', ['Style'])
# Adding model 'RenderedLayer'
db.create_table(u'ga_resources_renderedlayer', (
(u'page_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.Page'], unique=True, primary_key=True)),
('content', self.gf('mezzanine.core.fields.RichTextField')()),
('data_resource', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ga_resources.DataResource'])),
('default_style', self.gf('django.db.models.fields.related.ForeignKey')(related_name='default_for_layer', to=orm['ga_resources.Style'])),
('default_class', self.gf('django.db.models.fields.CharField')(default='default', max_length=255)),
('public', self.gf('django.db.models.fields.BooleanField')(default=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
))
db.send_create_signal(u'ga_resources', ['RenderedLayer'])
# Adding M2M table for field styles on 'RenderedLayer'
m2m_table_name = db.shorten_name(u'ga_resources_renderedlayer_styles')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('renderedlayer', models.ForeignKey(orm[u'ga_resources.renderedlayer'], null=False)),
('style', models.ForeignKey(orm[u'ga_resources.style'], null=False))
))
db.create_unique(m2m_table_name, ['renderedlayer_id', 'style_id'])
def backwards(self, orm):
# Deleting model 'CatalogPage'
db.delete_table(u'ga_resources_catalogpage')
# Deleting model 'DataResource'
db.delete_table(u'ga_resources_dataresource')
# Deleting model 'OrderedResource'
db.delete_table(u'ga_resources_orderedresource')
# Deleting model 'ResourceGroup'
db.delete_table(u'ga_resources_resourcegroup')
# Deleting model 'RelatedResource'
db.delete_table(u'ga_resources_relatedresource')
# Deleting model 'Style'
db.delete_table(u'ga_resources_style')
# Deleting model 'RenderedLayer'
db.delete_table(u'ga_resources_renderedlayer')
# Removing M2M table for field styles on 'RenderedLayer'
db.delete_table(db.shorten_name(u'ga_resources_renderedlayer_styles'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ga_resources.catalogpage': {
'Meta': {'ordering': "['title']", 'object_name': 'CatalogPage', '_ormbases': [u'pages.Page']},
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'ga_resources.dataresource': {
'Meta': {'ordering': "['title']", 'object_name': 'DataResource', '_ormbases': [u'pages.Page']},
'big': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bounding_box': ('django.contrib.gis.db.models.fields.PolygonField', [], {'null': 'True', 'blank': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'driver': ('django.db.models.fields.CharField', [], {'default': "'ga_resources.drivers.spatialite'", 'max_length': '255'}),
'last_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_refresh': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'metadata_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'metadata_xml': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'native_bounding_box': ('django.contrib.gis.db.models.fields.PolygonField', [], {'null': 'True', 'blank': 'True'}),
'native_srs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'next_refresh': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'refresh_every': ('timedelta.fields.TimedeltaField', [], {'null': 'True', 'blank': 'True'}),
'resource_config': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resource_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'resource_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'three_d': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ga_resources.orderedresource': {
'Meta': {'object_name': 'OrderedResource'},
'data_resource': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ga_resources.DataResource']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resource_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ga_resources.ResourceGroup']"})
},
u'ga_resources.relatedresource': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'RelatedResource', '_ormbases': [u'pages.Page']},
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'driver': ('django.db.models.fields.CharField', [], {'default': "'ga_resources.drivers.related.excel'", 'max_length': '255'}),
'foreign_key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'foreign_resource': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ga_resources.DataResource']"}),
'how': ('django.db.models.fields.CharField', [], {'default': "'left'", 'max_length': '8'}),
'key_transform': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'left_index': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'local_key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'resource_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'right_index': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ga_resources.renderedlayer': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'RenderedLayer', '_ormbases': [u'pages.Page']},
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'data_resource': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ga_resources.DataResource']"}),
'default_class': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'default_style': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'default_for_layer'", 'to': u"orm['ga_resources.Style']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'styles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ga_resources.Style']", 'symmetrical': 'False'})
},
u'ga_resources.resourcegroup': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'ResourceGroup', '_ormbases': [u'pages.Page']},
'is_timeseries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'min_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'resources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ga_resources.DataResource']", 'symmetrical': 'False', 'through': u"orm['ga_resources.OrderedResource']", 'blank': 'True'})
},
u'ga_resources.style': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Style', '_ormbases': [u'pages.Page']},
'legend': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'legend_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'legend_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'stylesheet': ('django.db.models.fields.TextField', [], {})
},
u'pages.page': {
'Meta': {'ordering': "(u'titles',)", 'object_name': 'Page'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '(1, 2, 3)', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['ga_resources']
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
print, rrr, profile = ut.inject2(__name__, '[_plh]')
VERB_PIPELINE = ut.get_argflag(('--verb-pipeline', '--verb-pipe')) or ut.VERYVERBOSE
VERB_TESTDATA = ut.get_argflag('--verb-testdata') or ut.VERYVERBOSE
def testrun_pipeline_upto(qreq_, stop_node=None, verbose=True):
r"""
Main tester function. Runs the pipeline by mirroring
`request_ibeis_query_L0`, but stops at a requested breakpoint and returns
the local variables.
convinience: runs pipeline for tests
this should mirror request_ibeis_query_L0
Ignore:
>>> # TODO: autogenerate
>>> # The following is a stub that starts the autogeneration process
>>> import utool as ut
>>> from ibeis.algo.hots import pipeline
>>> source = ut.get_func_sourcecode(pipeline.request_ibeis_query_L0,
>>> strip_docstr=True, stripdef=True,
>>> strip_comments=True)
>>> import re
>>> source = re.sub(r'^\s*$\n', '', source, flags=re.MULTILINE)
>>> print(source)
>>> ut.replace_between_tags(source, '', sentinal)
"""
from ibeis.algo.hots.pipeline import (
nearest_neighbors, baseline_neighbor_filter, weight_neighbors,
build_chipmatches, spatial_verification,
vsone_reranking, build_impossible_daids_list)
print('RUN PIPELINE UPTO: %s' % (stop_node,))
print(qreq_)
qreq_.lazy_load(verbose=verbose)
#---
if stop_node == 'build_impossible_daids_list':
return locals()
impossible_daids_list, Kpad_list = build_impossible_daids_list(qreq_)
#---
if stop_node == 'nearest_neighbors':
return locals()
nns_list = nearest_neighbors(qreq_, Kpad_list, verbose=verbose)
#---
if stop_node == 'baseline_neighbor_filter':
return locals()
nnvalid0_list = baseline_neighbor_filter(qreq_, nns_list, impossible_daids_list, verbose=verbose)
#---
if stop_node == 'weight_neighbors':
return locals()
weight_ret = weight_neighbors(qreq_, nns_list, nnvalid0_list, verbose=verbose)
filtkey_list, filtweights_list, filtvalids_list, filtnormks_list = weight_ret
#---
if stop_node == 'filter_neighbors':
raise AssertionError('no longer exists')
#---
if stop_node == 'build_chipmatches':
return locals()
cm_list_FILT = build_chipmatches(qreq_, nns_list, nnvalid0_list,
filtkey_list, filtweights_list,
filtvalids_list, filtnormks_list,
verbose=verbose)
#---
if stop_node == 'spatial_verification':
return locals()
cm_list_SVER = spatial_verification(qreq_, cm_list_FILT, verbose=verbose)
#---
if stop_node == 'vsone_reranking':
return locals()
if qreq_.qparams.rrvsone_on:
# VSONE RERANKING
cm_list_VSONERR = vsone_reranking(qreq_, cm_list_SVER, verbose=verbose)
cm_list = cm_list_VSONERR
else:
cm_list = cm_list_SVER
assert False, 'unknown stop_node=%r' % (stop_node,)
#qaid2_svtups = qreq_.metadata['qaid2_svtups']
return locals()
def testdata_pre(stopnode, defaultdb='testdb1', p=['default'],
a=['default:qindex=0:1,dindex=0:5'], **kwargs):
""" New (1-1-2016) generic pipeline node testdata getter
Args:
stopnode (str):
defaultdb (str): (default = u'testdb1')
p (list): (default = [u'default:'])
a (list): (default = [u'default:qsize=1,dsize=4'])
Returns:
tuple: (ibs, qreq_, args)
CommandLine:
python -m ibeis.algo.hots._pipeline_helpers --exec-testdata_pre --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots._pipeline_helpers import * # NOQA
>>> stopnode = 'build_chipmatches'
>>> defaultdb = 'testdb1'
>>> p = ['default:']
>>> a = ['default:qindex=0:1,dindex=0:5']
>>> qreq_, args = testdata_pre(stopnode, defaultdb, p, a)
"""
import ibeis
from ibeis.algo.hots import pipeline
qreq_ = ibeis.testdata_qreq_(defaultdb=defaultdb, p=p, a=a, **kwargs)
locals_ = testrun_pipeline_upto(qreq_, stopnode)
func = getattr(pipeline, stopnode)
argnames = ut.get_argnames(func)
# Hack to ignore qreq_, and verbose
for ignore in ['qreq_', 'ibs', 'verbose']:
try:
argnames.remove(ignore)
except ValueError:
pass
tupname = '_Ret_' + stopnode.upper()
args = ut.dict_take_asnametup(locals_, argnames, name=tupname)
return qreq_, args
#+--- OTHER TESTDATA FUNCS ---
def testdata_pre_weight_neighbors(defaultdb='testdb1', qaid_list=[1, 2], daid_list=None, codename='vsmany', cfgdict=None):
"""
TODO: replace testdata_pre_weight_neighbors with
>>> qreq_, args = plh.testdata_pre('weight_neighbors', defaultdb='testdb1',
>>> a=['default:qindex=0:1,dindex=0:5,hackerrors=False'],
>>> p=['default:codename=vsmany,bar_l2_on=True,fg_on=False'], verbose=True)
"""
if cfgdict is None:
cfgdict = dict(codename=codename)
import ibeis
p = 'default' + ut.get_cfg_lbl(cfgdict)
qreq_ = ibeis.testdata_qreq_(defaultdb=defaultdb, default_qaids=qaid_list, default_daids=daid_list, p=p)
ibs = qreq_.ibs
locals_ = testrun_pipeline_upto(qreq_, 'weight_neighbors')
nns_list, nnvalid0_list = ut.dict_take(locals_, ['nns_list', 'nnvalid0_list'])
# qreq_, args = testdata_pre('weight_neighbors', defaultdb=defaultdb, p=['default:bar_l2_on=True,fg_on=False'])
return ibs, qreq_, nns_list, nnvalid0_list
def testdata_sparse_matchinfo_nonagg(defaultdb='testdb1', p=['default']):
qreq_, args = testdata_pre('build_chipmatches', defaultdb=defaultdb, p=p)
internal_index = 1 if qreq_.qparams.vsone else 0
# qaid = qreq_.qaids[0]
# daid = qreq_.daids[1]
qaid = qreq_.qaids[0]
daid = qreq_.daids[1]
nns = args.nns_list[internal_index]
neighb_idx, neighb_dist = args.nns_list[internal_index]
neighb_valid0 = args.nnvalid0_list[internal_index]
neighb_score_list = args.filtweights_list[internal_index]
neighb_valid_list = args.filtvalids_list[internal_index]
neighb_normk = args.filtnormks_list[internal_index]
Knorm = qreq_.qparams.Knorm
args = (nns, neighb_idx, neighb_valid0, neighb_score_list, neighb_valid_list, neighb_normk, Knorm)
return qreq_, qaid, daid, args
def testdata_pre_baselinefilter(defaultdb='testdb1', qaid_list=None, daid_list=None, codename='vsmany'):
cfgdict = dict(codename=codename)
import ibeis
p = 'default' + ut.get_cfg_lbl(cfgdict)
qreq_ = ibeis.testdata_qreq_(defaultdb=defaultdb, default_qaids=qaid_list, default_daids=daid_list, p=p)
locals_ = testrun_pipeline_upto(qreq_, 'baseline_neighbor_filter')
nns_list, impossible_daids_list = ut.dict_take(locals_, ['nns_list', 'impossible_daids_list'])
return qreq_, nns_list, impossible_daids_list
def testdata_pre_sver(defaultdb='PZ_MTEST', qaid_list=None, daid_list=None):
"""
>>> from ibeis.algo.hots._pipeline_helpers import * # NOQA
"""
#from ibeis.algo import Config
cfgdict = dict()
import ibeis
p = 'default' + ut.get_cfg_lbl(cfgdict)
qreq_ = ibeis.testdata_qreq_(defaultdb=defaultdb, default_qaids=qaid_list, default_daids=daid_list, p=p)
ibs = qreq_.ibs
locals_ = testrun_pipeline_upto(qreq_, 'spatial_verification')
cm_list = locals_['cm_list_FILT']
#nnfilts_list = locals_['nnfilts_list']
return ibs, qreq_, cm_list
def testdata_post_sver(defaultdb='PZ_MTEST', qaid_list=None, daid_list=None, codename='vsmany', cfgdict=None):
"""
>>> from ibeis.algo.hots._pipeline_helpers import * # NOQA
"""
#from ibeis.algo import Config
if cfgdict is None:
cfgdict = dict(codename=codename)
import ibeis
p = 'default' + ut.get_cfg_lbl(cfgdict)
qreq_ = ibeis.testdata_qreq_(defaultdb=defaultdb, default_qaids=qaid_list, default_daids=daid_list, p=p)
ibs = qreq_.ibs
locals_ = testrun_pipeline_upto(qreq_, 'vsone_reranking')
cm_list = locals_['cm_list_SVER']
#nnfilts_list = locals_['nnfilts_list']
return ibs, qreq_, cm_list
def testdata_pre_vsonerr(defaultdb='PZ_MTEST', qaid_list=[1], daid_list='all'):
"""
>>> from ibeis.algo.hots._pipeline_helpers import * # NOQA
"""
cfgdict = dict(sver_output_weighting=True, codename='vsmany', rrvsone_on=True)
import ibeis
p = 'default' + ut.get_cfg_lbl(cfgdict)
qreq_ = ibeis.testdata_qreq_(defaultdb=defaultdb, default_qaids=qaid_list, default_daids=daid_list, p=p)
ibs = qreq_.ibs
qaid_list = qreq_.qaids.tolist()
qaid = qaid_list[0]
#daid_list = qreq_.daids.tolist()
if len(ibs.get_annot_groundtruth(qaid)) == 0:
print('WARNING: qaid=%r has no groundtruth' % (qaid,))
locals_ = testrun_pipeline_upto(qreq_, 'vsone_reranking')
cm_list = locals_['cm_list_SVER']
return ibs, qreq_, cm_list, qaid_list
def testdata_scoring(defaultdb='PZ_MTEST', qaid_list=[1], daid_list='all'):
from ibeis.algo.hots import vsone_pipeline
ibs, qreq_, prior_cm = testdata_matching(defaultdb=defaultdb, qaid_list=qaid_list, daid_list=daid_list)
config = qreq_.qparams
cm = vsone_pipeline.refine_matches(qreq_, prior_cm, config)
cm.evaluate_dnids(qreq_)
return qreq_, cm
def testdata_matching(*args, **kwargs):
"""
>>> from ibeis.algo.hots._pipeline_helpers import * # NOQA
"""
from ibeis.algo.hots import vsone_pipeline
from ibeis.algo.hots import scoring
from ibeis.algo.hots import pipeline # NOQA
ibs, qreq_, cm_list, qaid_list = testdata_pre_vsonerr(*args, **kwargs)
vsone_pipeline.prepare_vsmany_chipmatch(qreq_, cm_list)
nNameShortlist = qreq_.qparams.nNameShortlistVsone
nAnnotPerName = qreq_.qparams.nAnnotPerNameVsone
scoring.score_chipmatch_list(qreq_, cm_list, 'nsum')
vsone_pipeline.prepare_vsmany_chipmatch(qreq_, cm_list)
cm_shortlist = scoring.make_chipmatch_shortlists(qreq_, cm_list, nNameShortlist, nAnnotPerName)
prior_cm = cm_shortlist[0]
return ibs, qreq_, prior_cm
#L_______
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.algo.hots._pipeline_helpers
python -m ibeis.algo.hots._pipeline_helpers --allexamples
python -m ibeis.algo.hots._pipeline_helpers --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
|
import string
from importlib import import_module
import warnings
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import models, DEFAULT_DB_ALIAS
from django.dispatch import receiver
from django.utils import timezone
from django.utils.functional import SimpleLazyObject
from django.utils.translation import gettext_lazy as _
from shop import deferred
from shop.models.fields import JSONField
from shop.signals import customer_recognized
from shop.models.fields import ChoiceEnum, ChoiceEnumField
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore()
class CustomerState(ChoiceEnum):
UNRECOGNIZED = 0, _("Unrecognized")
GUEST = 1, _("Guest")
REGISTERED = 2, ("Registered")
class CustomerQuerySet(models.QuerySet):
def _filter_or_exclude(self, negate, *args, **kwargs):
"""
Emulate filter queries on a Customer using attributes from the User object.
Example: Customer.objects.filter(last_name__icontains='simpson') will return
a queryset with customers whose last name contains "simpson".
"""
opts = self.model._meta
lookup_kwargs = {}
for key, lookup in kwargs.items():
try:
field_name = key[:key.index('__')]
except ValueError:
field_name = key
if field_name == 'pk':
field_name = opts.pk.name
try:
opts.get_field(field_name)
if isinstance(lookup, get_user_model()):
lookup.pk # force lazy object to resolve
lookup_kwargs[key] = lookup
except FieldDoesNotExist as fdne:
try:
get_user_model()._meta.get_field(field_name)
lookup_kwargs['user__' + key] = lookup
except FieldDoesNotExist:
raise fdne
except Exception as othex:
raise othex
result = super()._filter_or_exclude(negate, *args, **lookup_kwargs)
return result
class CustomerManager(models.Manager):
"""
Manager for the Customer database model. This manager can also cope with customers, which have
an entity in the database but otherwise are considered as anonymous. The username of these
so called unrecognized customers is a compact version of the session key.
"""
BASE64_ALPHABET = string.digits + string.ascii_uppercase + string.ascii_lowercase + '.@'
REVERSE_ALPHABET = dict((c, i) for i, c in enumerate(BASE64_ALPHABET))
BASE36_ALPHABET = string.digits + string.ascii_lowercase
_queryset_class = CustomerQuerySet
@classmethod
def encode_session_key(cls, session_key):
"""
Session keys have base 36 and length 32. Since the field ``username`` accepts only up
to 30 characters, the session key is converted to a base 64 representation, resulting
in a length of approximately 28.
"""
return cls._encode(int(session_key[:32], 36), cls.BASE64_ALPHABET)
@classmethod
def decode_session_key(cls, compact_session_key):
"""
Decode a compact session key back to its original length and base.
"""
base_length = len(cls.BASE64_ALPHABET)
n = 0
for c in compact_session_key:
n = n * base_length + cls.REVERSE_ALPHABET[c]
return cls._encode(n, cls.BASE36_ALPHABET).zfill(32)
@classmethod
def _encode(cls, n, base_alphabet):
base_length = len(base_alphabet)
s = []
while True:
n, r = divmod(n, base_length)
s.append(base_alphabet[r])
if n == 0:
break
return ''.join(reversed(s))
def get_queryset(self):
"""
Whenever we fetch from the Customer table, inner join with the User table to reduce the
number of presumed future queries to the database.
"""
qs = self._queryset_class(self.model, using=self._db).select_related('user')
return qs
def create(self, *args, **kwargs):
if 'user' in kwargs and kwargs['user'].is_authenticated:
kwargs.setdefault('recognized', CustomerState.REGISTERED)
customer = super().create(*args, **kwargs)
return customer
def _get_visiting_user(self, session_key):
"""
Since the Customer has a 1:1 relation with the User object, look for an entity of a
User object. As its ``username`` (which must be unique), use the given session key.
"""
username = self.encode_session_key(session_key)
try:
user = get_user_model().objects.get(username=username)
except get_user_model().DoesNotExist:
user = AnonymousUser()
return user
def get_from_request(self, request):
"""
Return an Customer object for the current User object.
"""
if request.user.is_anonymous and request.session.session_key:
# the visitor is determined through the session key
user = self._get_visiting_user(request.session.session_key)
else:
user = request.user
try:
if user.customer:
return user.customer
except AttributeError:
pass
if request.user.is_authenticated:
customer, created = self.get_or_create(user=user)
if created: # `user` has been created by another app than shop
customer.recognize_as_registered(request)
else:
customer = VisitingCustomer()
return customer
def get_or_create_from_request(self, request):
if request.user.is_authenticated:
user = request.user
recognized = CustomerState.REGISTERED
else:
if not request.session.session_key:
request.session.cycle_key()
assert request.session.session_key
username = self.encode_session_key(request.session.session_key)
# create or get a previously created inactive intermediate user,
# which later can declare himself as guest, or register as a valid Django user
try:
user = get_user_model().objects.get(username=username)
except get_user_model().DoesNotExist:
user = get_user_model().objects.create_user(username)
user.is_active = False
user.save()
recognized = CustomerState.UNRECOGNIZED
customer, created = self.get_or_create(user=user, recognized=recognized)
return customer
class BaseCustomer(models.Model, metaclass=deferred.ForeignKeyBuilder):
"""
Base class for shop customers.
Customer is a profile model that extends
the django User model if a customer is authenticated. On checkout, a User
object is created for anonymous customers also (with unusable password).
"""
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True,
related_name='customer',
)
recognized = ChoiceEnumField(
_("Recognized as"),
enum_type=CustomerState,
help_text=_("Designates the state the customer is recognized as."),
)
last_access = models.DateTimeField(
_("Last accessed"),
default=timezone.now,
)
extra = JSONField(
editable=False,
verbose_name=_("Extra information about this customer"),
)
objects = CustomerManager()
class Meta:
abstract = True
def __str__(self):
return self.get_username()
def get_username(self):
return self.user.get_username()
def get_full_name(self):
return self.user.get_full_name()
@property
def first_name(self):
# pending deprecation: warnings.warn("Property first_name is deprecated and will be removed")
return self.user.first_name
@first_name.setter
def first_name(self, value):
# pending deprecation: warnings.warn("Property first_name is deprecated and will be removed")
self.user.first_name = value
@property
def last_name(self):
# pending deprecation: warnings.warn("Property last_name is deprecated and will be removed")
return self.user.last_name
@last_name.setter
def last_name(self, value):
# pending deprecation: warnings.warn("Property last_name is deprecated and will be removed")
self.user.last_name = value
@property
def email(self):
return self.user.email
@email.setter
def email(self, value):
self.user.email = value
@property
def date_joined(self):
return self.user.date_joined
@property
def last_login(self):
return self.user.last_login
@property
def groups(self):
return self.user.groups
@property
def is_anonymous(self):
return self.recognized in (CustomerState.UNRECOGNIZED, CustomerState.GUEST)
@property
def is_authenticated(self):
return self.recognized is CustomerState.REGISTERED
@property
def is_recognized(self):
"""
Return True if the customer is associated with a User account.
Unrecognized customers have accessed the shop, but did not register
an account nor declared themselves as guests.
"""
return self.recognized is not CustomerState.UNRECOGNIZED
@property
def is_guest(self):
"""
Return true if the customer isn't associated with valid User account, but declared
himself as a guest, leaving their email address.
"""
return self.recognized is CustomerState.GUEST
def recognize_as_guest(self, request=None, commit=True):
"""
Recognize the current customer as guest customer.
"""
if self.recognized != CustomerState.GUEST:
self.recognized = CustomerState.GUEST
if commit:
self.save(update_fields=['recognized'])
customer_recognized.send(sender=self.__class__, customer=self, request=request)
@property
def is_registered(self):
"""
Return true if the customer has registered himself.
"""
return self.recognized is CustomerState.REGISTERED
def recognize_as_registered(self, request=None, commit=True):
"""
Recognize the current customer as registered customer.
"""
if self.recognized != CustomerState.REGISTERED:
self.recognized = CustomerState.REGISTERED
if commit:
self.save(update_fields=['recognized'])
customer_recognized.send(sender=self.__class__, customer=self, request=request)
@property
def is_visitor(self):
"""
Always False for instantiated Customer objects.
"""
return False
@property
def is_expired(self):
"""
Return True if the session of an unrecognized customer expired or is not decodable.
Registered customers never expire.
Guest customers only expire, if they failed fulfilling the purchase.
"""
is_expired = False
if self.recognized is CustomerState.UNRECOGNIZED:
try:
session_key = CustomerManager.decode_session_key(self.user.username)
is_expired = not SessionStore.exists(session_key)
except KeyError:
msg = "Unable to decode username '{}' as session key"
warnings.warn(msg.format(self.user.username))
is_expired = True
return is_expired
def get_or_assign_number(self):
"""
Hook to get or to assign the customers number. It is invoked, every time an Order object
is created. Using a customer number, which is different from the primary key is useful for
merchants, wishing to assign sequential numbers only to customers which actually bought
something. Otherwise the customer number (primary key) is increased whenever a site visitor
puts something into the cart. If he never proceeds to checkout, that entity expires and may
be deleted at any time in the future.
"""
return self.get_number()
def get_number(self):
"""
Hook to get the customer's number. Customers haven't purchased anything may return None.
"""
return str(self.user_id)
def save(self, **kwargs):
if 'update_fields' not in kwargs:
self.user.save(using=kwargs.get('using', DEFAULT_DB_ALIAS))
super().save(**kwargs)
def delete(self, *args, **kwargs):
if self.user.is_active and self.recognized is CustomerState.UNRECOGNIZED:
# invalid state of customer, keep the referred User
super().delete(*args, **kwargs)
else:
# also delete self through cascading
self.user.delete(*args, **kwargs)
CustomerModel = deferred.MaterializedModel(BaseCustomer)
class VisitingCustomer:
"""
This dummy object is used for customers which just visit the site. Whenever a VisitingCustomer
adds something to the cart, this object is replaced against a real Customer object.
"""
user = AnonymousUser()
def __str__(self):
return 'Visitor'
@property
def email(self):
return ''
@email.setter
def email(self, value):
pass
@property
def is_anonymous(self):
return True
@property
def is_authenticated(self):
return False
@property
def is_recognized(self):
return False
@property
def is_guest(self):
return False
@property
def is_registered(self):
return False
@property
def is_visitor(self):
return True
def save(self, **kwargs):
pass
@receiver(user_logged_in)
def handle_customer_login(sender, **kwargs):
"""
Update request.customer to an authenticated Customer
"""
try:
kwargs['request'].customer = kwargs['user'].customer
except (AttributeError, ObjectDoesNotExist):
kwargs['request'].customer = SimpleLazyObject(lambda: CustomerModel.objects.get_from_request(kwargs['request']))
@receiver(user_logged_out)
def handle_customer_logout(sender, **kwargs):
"""
Update request.customer to a visiting Customer
"""
# defer assignment to anonymous customer, since the session_key is not yet rotated
kwargs['request'].customer = SimpleLazyObject(lambda: CustomerModel.objects.get_from_request(kwargs['request']))
|
|
"""
Applies a (previously trained) Caffe classification model to new image data.
This code takes a brute-force approach to generating per-pixel predictions;
it simply extracts a tile around every pixel (that is sufficiently far away
from the image border) and runs each tile through Caffe.
This entails many repeated calculations and is not computationally efficient.
More sophisticated approaches for dense predictions exist and could
be considered in the future (e.g. see second reference below).
Alternatively, you can change this code to evaluate only a subset of the
pixels and then inpaint the remainder.
REFERENCES:
o http://caffe.berkeleyvision.org/
o Long et. al. "Fully convolutional networks for semantic segmentation"
http://arxiv.org/pdf/1411.4038.pdf
"""
__author__ = "mjp"
__copyright__ = "Copyright 2015, JHU/APL"
__license__ = "Apache 2.0"
import os
import argparse
import time
import numpy as np
import lmdb
import pdb
import skimage, skimage.io, skimage.transform
import caffe # make sure PyCaffe is in your PYTHONPATH
def get_args():
"""Defines command line arguments for this script.
"""
parser = argparse.ArgumentParser(description="Deploy a classifier.")
parser.add_argument('--input', type=str,
dest="inputFile", required=True,
help="Input file to process (a .png)")
parser.add_argument('--network', type=str,
dest="netFile", required=True,
help="Caffe CNN network file")
parser.add_argument('--weights', type=str,
dest="weightFile", required=True,
help="Caffe parameters associated with network")
parser.add_argument('--gpu', type=int,
dest="gpuId", default=-1,
help="Which GPU to use (or -1 for CPU)")
parser.add_argument('--output-layer', type=str,
dest="outputLayer", default="pred",
help="Name of the CNN output layer to use for predictions")
return parser.parse_args()
def init_caffe_network(netFile, weightFile, gpuId):
"""Creates a caffe network object.
"""
# set CPU or GPU mode
if gpuId >= 0:
caffe.set_mode_gpu()
caffe.set_device(gpuId)
else:
caffe.set_mode_cpu()
# instantiate the Caffe model
#
# Note: PyCaffe also offers a different interface via caffe.Classifier;
# however, this does things behinds the scenes to data
# (and also seems slower). So for this application, we'll stick
# with the somewhat lower-level interface provided by caffe.Net.
net = caffe.Net(netFile, weightFile, 1)
return net
def _interior_pixel_generator(X, tileDim, batchSize, mask=None):
"""An iterator over pixel indices that lie in the interior of an image.
Warning: this is fairly memory intensive (pre-computes the entire
list of indices).
Note: could potentially speed up the process of extracting subtiles by
creating a more efficient implementation; however, the bulk of the
runtime is usually associated with Caffe forward passes.
Parameters:
X := a (width x height) image
tileDim := the width/height of each square tile to extract
batchSize := number of tiles to extract each iteration
(dictated by Caffe model)
mask := a boolean matrix the same size as X; elements that are
true are eligible to be used as tile centers.
If None, there is no restriction on which pixels
to evaluate.
"""
m,n = X.shape
assert(np.mod(tileDim,2) == 1) # tile size must be odd
tileRadius = int(np.floor(tileDim/2.0))
nChannels = 1
Xmb = np.zeros((batchSize, nChannels, tileDim, tileDim), dtype=np.uint8)
# Used to restrict the set of pixels under consideration.
bitMask = np.ones(X.shape, dtype=bool)
# by default, we do not evaluate pixels that are too close to
# the border of the image.
bitMask[0:tileRadius, :] = 0
bitMask[(m-tileRadius):m, :] = 0
bitMask[:, 0:tileRadius] = 0
bitMask[:, (n-tileRadius):n] = 0
if mask is not None:
bitMask = bitMask & mask
# return indices and tiles in subsets of size batchSize
Idx = np.column_stack(np.nonzero(bitMask))
for ii in range(0, Idx.shape[0], batchSize):
nRet = min(batchSize, Idx.shape[0] - ii)
# creates the next "mini-batch"
for jj in range(nRet):
a = Idx[ii+jj,0] - tileRadius
b = Idx[ii+jj,0] + tileRadius + 1
c = Idx[ii+jj,1] - tileRadius
d = Idx[ii+jj,1] + tileRadius + 1
Xmb[jj, 0, :, :] = X[a:b, c:d]
yield Xmb, Idx[ii:(ii+nRet),:]
def predict_tiles(X, net, outputLayer):
"""Given an example generator whose features correspond to images,
produces a new stream of examples where X is now caffe features.
"""
assert(X.ndim == 2) # currently assumes a single channel image
Prob = np.zeros(X.shape, dtype=np.float32)
# make sure the desired feature layers exists in the network
if outputLayer not in net.blobs:
raise RuntimeError('could not find layer %s in model' % outputLayer)
# assumes the data layer is called "data"
if "data" not in net.blobs:
raise RuntimeError('could not find data layer in model - did you call it something other than "data"?')
nMiniBatch, nChannels, rows, cols = net.blobs['data'].data.shape
assert(rows == cols)
tileDim = rows
assert(nChannels == 1) # we assume this for now
#----------------------------------------
# process each mini-batch and save results
#----------------------------------------
lastChatter = -1
tic = time.time()
for Xmb, idx in _interior_pixel_generator(X, tileDim, nMiniBatch):
net.set_input_arrays(Xmb.astype(np.float32),
np.zeros((Xmb.shape[0],), dtype=np.float32))
net.forward()
# Extract predictions (for class 1) from network.
# For a multi-class problem, you would want to extract predictions
# for all classes.
output = net.blobs[outputLayer].data
pred = np.squeeze(output[0:idx.shape[0], 1])
for kk in range(idx.shape[0]):
Prob[idx[kk,0], idx[kk,1]] = pred[kk]
# report progress periodically
runtimeMin = np.floor((time.time() - tic)/60.)
if runtimeMin > lastChatter:
print('[info]: last pixel processed was (%d,%d); runtime=%d min' % (idx[-1,0], idx[-1,1], runtimeMin))
print('[info]: last batch min/max/mean prediction: %0.2f / %0.2f / %0.2f' % (np.min(pred), np.max(pred), np.mean(pred)))
lastChatter = runtimeMin
return Prob
if __name__ == "__main__":
args = get_args()
print args
# create caffe model
net = init_caffe_network(args.netFile,
args.weightFile,
args.gpuId)
X = skimage.io.imread(args.inputFile)
print('[info]: Proccessing image with dimensions %s and type %s' % (str(X.shape), X.dtype))
print('[info]: X min / max / mean: %0.2f / %0.2f / %0.2f' % (np.min(X), np.max(X), np.mean(X)))
assert(X.dtype == np.uint8)
# run caffe
# Note: these probabilities may not be properly calibrated (if we
# trained on a balanced data set and are deploying on a
# data set with class imbalance). If calibrated probabilities
# are needed, you will want to do some additional postprocessing.
Prob1 = predict_tiles(X, net, args.outputLayer)
# Save results
outFile = args.inputFile + ".Yhat"
np.save(outFile+".npy", Prob1)
skimage.io.imsave(outFile+".tiff", Prob1)
print('[info]: Wrote results to "%s.XXX"' % outFile)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# (c) 2015 The Johns Hopkins University / Applied Physics Laboratory
# All Rights Reserved. Contact the JHU/APL Office of Technology Transfer for any # additional rights. www.jhuapl.edu/ott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-credential file store with lock support.
This module implements a JSON credential store where multiple
credentials can be stored in one file. That file supports locking
both in a single process and across processes.
The credential themselves are keyed off of:
* client_id
* user_agent
* scope
The format of the stored data is like so::
{
'file_version': 1,
'data': [
{
'key': {
'clientId': '<client id>',
'userAgent': '<user agent>',
'scope': '<scope>'
},
'credential': {
# JSON serialized Credentials.
}
}
]
}
"""
import errno
import json
import logging
import os
import threading
from oauth2client.client import Credentials
from oauth2client.client import Storage as BaseStorage
from oauth2client import util
from oauth2client.locked_file import LockedFile
logger = logging.getLogger(__name__)
# A dict from 'filename'->_MultiStore instances
_multistores = {}
_multistores_lock = threading.Lock()
class Error(Exception):
"""Base error for this module."""
class NewerCredentialStoreError(Error):
"""The credential store is a newer version than supported."""
@util.positional(4)
def get_credential_storage(filename, client_id, user_agent, scope,
warn_on_readonly=True):
"""Get a Storage instance for a credential.
Args:
filename: The JSON file storing a set of credentials
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: string or iterable of strings, Scope(s) being requested
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
# Recreate the legacy key with these specific parameters
key = {'clientId': client_id, 'userAgent': user_agent,
'scope': util.scopes_to_string(scope)}
return get_credential_storage_custom_key(
filename, key, warn_on_readonly=warn_on_readonly)
@util.positional(2)
def get_credential_storage_custom_string_key(
filename, key_string, warn_on_readonly=True):
"""Get a Storage instance for a credential using a single string as a key.
Allows you to provide a string as a custom key that will be used for
credential storage and retrieval.
Args:
filename: The JSON file storing a set of credentials
key_string: A string to use as the key for storing this credential.
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
# Create a key dictionary that can be used
key_dict = {'key': key_string}
return get_credential_storage_custom_key(
filename, key_dict, warn_on_readonly=warn_on_readonly)
@util.positional(2)
def get_credential_storage_custom_key(
filename, key_dict, warn_on_readonly=True):
"""Get a Storage instance for a credential using a dictionary as a key.
Allows you to provide a dictionary as a custom key that will be used for
credential storage and retrieval.
Args:
filename: The JSON file storing a set of credentials
key_dict: A dictionary to use as the key for storing this credential. There
is no ordering of the keys in the dictionary. Logically equivalent
dictionaries will produce equivalent storage keys.
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
multistore = _get_multistore(filename, warn_on_readonly=warn_on_readonly)
key = util.dict_to_tuple_key(key_dict)
return multistore._get_storage(key)
@util.positional(1)
def get_all_credential_keys(filename, warn_on_readonly=True):
"""Gets all the registered credential keys in the given Multistore.
Args:
filename: The JSON file storing a set of credentials
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
A list of the credential keys present in the file. They are returned as
dictionaries that can be passed into get_credential_storage_custom_key to
get the actual credentials.
"""
multistore = _get_multistore(filename, warn_on_readonly=warn_on_readonly)
multistore._lock()
try:
return multistore._get_all_credential_keys()
finally:
multistore._unlock()
@util.positional(1)
def _get_multistore(filename, warn_on_readonly=True):
"""A helper method to initialize the multistore with proper locking.
Args:
filename: The JSON file storing a set of credentials
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
A multistore object
"""
filename = os.path.expanduser(filename)
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(
filename, _MultiStore(filename, warn_on_readonly=warn_on_readonly))
finally:
_multistores_lock.release()
return multistore
class _MultiStore(object):
"""A file backed store for multiple credentials."""
@util.positional(2)
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._file = LockedFile(filename, 'r+', 'r')
self._thread_lock = threading.Lock()
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# ((key, value), (key, value)...) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
class _Storage(BaseStorage):
"""A Storage object that knows how to read/write a single credential."""
def __init__(self, multistore, key):
self._multistore = multistore
self._key = key
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
self._multistore._lock()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._multistore._unlock()
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
credential = self._multistore._get_credential(self._key)
if credential:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._update_credential(self._key, credentials)
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._delete_credential(self._key)
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._file.filename()):
old_umask = os.umask(0o177)
try:
open(self._file.filename(), 'a+b').close()
finally:
os.umask(old_umask)
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
try:
self._file.open_and_lock()
except IOError as e:
if e.errno == errno.ENOSYS:
logger.warn('File system does not support locking the credentials '
'file.')
elif e.errno == errno.ENOLCK:
logger.warn('File system is out of resources for writing the '
'credentials file (is your disk full?).')
else:
raise
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.', self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _unlock(self):
"""Release the lock on the multistore."""
self._file.unlock_and_close()
self._thread_lock.release()
def _locked_json_read(self):
"""Get the raw content of the multistore file.
The multistore must be locked when this is called.
Returns:
The contents of the multistore decoded as JSON.
"""
assert self._thread_lock.locked()
self._file.file_handle().seek(0)
return json.load(self._file.file_handle())
def _locked_json_write(self, data):
"""Write a JSON serializable data structure to the multistore.
The multistore must be locked when this is called.
Args:
data: The data to be serialized and written.
"""
assert self._thread_lock.locked()
if self._read_only:
return
self._file.file_handle().seek(0)
json.dump(data, self._file.file_handle(), sort_keys=True, indent=2, separators=(',', ': '))
self._file.file_handle().truncate()
def _refresh_data_cache(self):
"""Refresh the contents of the multistore.
The multistore must be locked when this is called.
Raises:
NewerCredentialStoreError: Raised when a newer client has written the
store.
"""
self._data = {}
try:
raw_data = self._locked_json_read()
except Exception:
logger.warn('Credential data store could not be loaded. '
'Will ignore and overwrite.')
return
version = 0
try:
version = raw_data['file_version']
except Exception:
logger.warn('Missing version for credential data store. It may be '
'corrupt or an old version. Overwriting.')
if version > 1:
raise NewerCredentialStoreError(
'Credential file has file_version of %d. '
'Only file_version of 1 is supported.' % version)
credentials = []
try:
credentials = raw_data['data']
except (TypeError, KeyError):
pass
for cred_entry in credentials:
try:
(key, credential) = self._decode_credential_from_json(cred_entry)
self._data[key] = credential
except:
# If something goes wrong loading a credential, just ignore it
logger.info('Error decoding credential, skipping', exc_info=True)
def _decode_credential_from_json(self, cred_entry):
"""Load a credential from our JSON serialization.
Args:
cred_entry: A dict entry from the data member of our format
Returns:
(key, cred) where the key is the key tuple and the cred is the
OAuth2Credential object.
"""
raw_key = cred_entry['key']
key = util.dict_to_tuple_key(raw_key)
credential = None
credential = Credentials.new_from_json(json.dumps(cred_entry['credential']))
return (key, credential)
def _write(self):
"""Write the cached data back out.
The multistore must be locked.
"""
raw_data = {'file_version': 1}
raw_creds = []
raw_data['data'] = raw_creds
for (cred_key, cred) in self._data.items():
raw_key = dict(cred_key)
raw_cred = json.loads(cred.to_json())
raw_creds.append({'key': raw_key, 'credential': raw_cred})
self._locked_json_write(raw_data)
def _get_all_credential_keys(self):
"""Gets all the registered credential keys in the multistore.
Returns:
A list of dictionaries corresponding to all the keys currently registered
"""
return [dict(key) for key in self._data.keys()]
def _get_credential(self, key):
"""Get a credential from the multistore.
The multistore must be locked.
Args:
key: The key used to retrieve the credential
Returns:
The credential specified or None if not present
"""
return self._data.get(key, None)
def _update_credential(self, key, cred):
"""Update a credential and write the multistore.
This must be called when the multistore is locked.
Args:
key: The key used to retrieve the credential
cred: The OAuth2Credential to update/set
"""
self._data[key] = cred
self._write()
def _delete_credential(self, key):
"""Delete a credential and write the multistore.
This must be called when the multistore is locked.
Args:
key: The key used to retrieve the credential
"""
try:
del self._data[key]
except KeyError:
pass
self._write()
def _get_storage(self, key):
"""Get a Storage object to get/set a credential.
This Storage is a 'view' into the multistore.
Args:
key: The key used to retrieve the credential
Returns:
A Storage object that can be used to get/set this cred
"""
return self._Storage(self, key)
|
|
import mimetypes
import urlparse
import os
from logging import getLogger
from pylons import config
from ckan.common import json
from ckan import plugins as p
import ckan.lib.helpers as h
try:
from ckan.lib.datapreview import on_same_domain
except ImportError:
from ckan.lib.datapreview import _on_same_domain as on_same_domain
ignore_empty = p.toolkit.get_validator('ignore_empty')
boolean_validator = p.toolkit.get_validator('boolean_validator')
log = getLogger(__name__)
GEOVIEW_FORMATS = ['kml', 'geojson', 'gml', 'wms', 'wfs', 'esrigeojson',
'gft', 'arcgis_rest']
def get_proxified_service_url(data_dict):
'''
:param data_dict: contains a resource and package dict
:type data_dict: dictionary
'''
controller = \
'ckanext.geoview.controllers.service_proxy:ServiceProxyController'
url = h.url_for(
action='proxy_service',
controller=controller,
id=data_dict['package']['name'],
resource_id=data_dict['resource']['id'])
log.debug('Proxified url is {0}'.format(url))
return url
def get_common_map_config():
'''
Returns a dict with all configuration options related to the common
base map (ie those starting with 'ckanext.spatial.common_map.')
'''
namespace = 'ckanext.spatial.common_map.'
return dict([(k.replace(namespace, ''), v) for k, v in config.iteritems()
if k.startswith(namespace)])
def get_openlayers_viewer_config():
'''
Returns a dict with all configuration options related to the
OpenLayers viewer (ie those starting with 'ckanext.geoview.ol_viewer.')
'''
namespace = 'ckanext.geoview.ol_viewer.'
return dict([(k.replace(namespace, ''), v) for k, v in config.iteritems()
if k.startswith(namespace)])
class GeoViewBase(p.SingletonPlugin):
'''This base class is for view extensions. '''
if p.toolkit.check_ckan_version(min_version='2.3'):
p.implements(p.IResourceView, inherit=True)
else:
p.implements(p.IResourcePreview, inherit=True)
p.implements(p.IConfigurer, inherit=True)
p.implements(p.IConfigurable, inherit=True)
proxy_enabled = False
same_domain = False
def update_config(self, config):
p.toolkit.add_public_directory(config, 'public')
p.toolkit.add_template_directory(config, 'templates')
p.toolkit.add_resource('public', 'ckanext-geoview')
self.proxy_enabled = 'resource_proxy' in config.get('ckan.plugins', '')
class OLGeoView(GeoViewBase):
p.implements(p.IRoutes, inherit=True)
p.implements(p.ITemplateHelpers, inherit=True)
# IRoutes
def before_map(self, m):
controller = \
'ckanext.geoview.controllers.service_proxy:ServiceProxyController'
m.connect('/dataset/{id}/resource/{resource_id}/service_proxy',
controller=controller,
action='proxy_service')
return m
# ITemplateHelpers
def get_helpers(self):
return {
'get_common_map_config_geoviews': get_common_map_config,
'get_openlayers_viewer_config': get_openlayers_viewer_config,
}
# IResourceView (CKAN >=2.3)
def info(self):
return {'name': 'geo_view',
'title': 'Map viewer (OpenLayers)',
'icon': 'globe',
'iframed': True,
'default_title': p.toolkit._('Map viewer'),
'schema': {
'feature_hoveron': [ignore_empty, boolean_validator],
'feature_style': [ignore_empty]
},
}
def can_view(self, data_dict):
format_lower = data_dict['resource'].get('format', '').lower()
same_domain = on_same_domain(data_dict)
# Guess from file extension
if not format_lower and data_dict['resource'].get('url'):
format_lower = self._guess_format_from_extension(
data_dict['resource']['url'])
if not format_lower:
return False
view_formats = config.get('ckanext.geoview.ol_viewer.formats', '')
if view_formats:
view_formats = view_formats.split(' ')
else:
view_formats = GEOVIEW_FORMATS
correct_format = format_lower in view_formats
can_preview_from_domain = self.proxy_enabled or same_domain
return correct_format and can_preview_from_domain
def view_template(self, context, data_dict):
return 'dataviewer/openlayers2.html'
def form_template(self, context, data_dict):
return 'dataviewer/openlayers_form.html'
# IResourcePreview (CKAN < 2.3)
def can_preview(self, data_dict):
return self.can_view(data_dict)
def preview_template(self, context, data_dict):
return 'dataviewer/openlayers2.html'
# Common for IResourceView and IResourcePreview
def _guess_format_from_extension(self, url):
try:
parsed_url = urlparse.urlparse(url)
format_lower = (os.path.splitext(parsed_url.path)[1][1:]
.encode('ascii', 'ignore').lower())
except ValueError, e:
log.error('Invalid URL: {0}, {1}'.format(url, e))
format_lower = ''
return format_lower
def setup_template_variables(self, context, data_dict):
import ckanext.resourceproxy.plugin as proxy
same_domain = on_same_domain(data_dict)
if not data_dict['resource'].get('format'):
data_dict['resource']['format'] = \
self._guess_format_from_extension(data_dict['resource']['url'])
if self.proxy_enabled and not same_domain:
proxy_url = proxy.get_proxified_resource_url(data_dict)
proxy_service_url = get_proxified_service_url(data_dict)
else:
proxy_url = data_dict['resource']['url']
proxy_service_url = data_dict['resource']['url']
gapi_key = config.get('ckanext.geoview.gapi_key')
if not p.toolkit.check_ckan_version(min_version='2.3'):
p.toolkit.c.resource['proxy_url'] = proxy_url
p.toolkit.c.resource['proxy_service_url'] = proxy_service_url
p.toolkit.c.resource['gapi_key'] = gapi_key
return {'resource_view_json': 'resource_view' in data_dict and json.dumps(data_dict['resource_view']),
'proxy_service_url': proxy_service_url,
'proxy_url': proxy_url,
'gapi_key': gapi_key}
class GeoJSONView(GeoViewBase):
p.implements(p.ITemplateHelpers, inherit=True)
GeoJSON = ['gjson', 'geojson']
def update_config(self, config):
super(GeoJSONView, self).update_config(config)
mimetypes.add_type('application/json', '.geojson')
# IResourceView (CKAN >=2.3)
def info(self):
return {'name': 'geojson_view',
'title': 'GeoJSON',
'icon': 'map-marker',
'iframed': True,
'default_title': p.toolkit._('GeoJSON'),
}
def can_view(self, data_dict):
resource = data_dict['resource']
format_lower = resource.get('format', '').lower()
if format_lower in self.GeoJSON:
return self.same_domain or self.proxy_enabled
return False
def view_template(self, context, data_dict):
return 'dataviewer/geojson.html'
# IResourcePreview (CKAN < 2.3)
def can_preview(self, data_dict):
format_lower = data_dict['resource']['format'].lower()
correct_format = format_lower in self.GeoJSON
can_preview_from_domain = (self.proxy_enabled or
data_dict['resource'].get('on_same_domain'))
quality = 2
if p.toolkit.check_ckan_version('2.1'):
if correct_format:
if can_preview_from_domain:
return {'can_preview': True, 'quality': quality}
else:
return {'can_preview': False,
'fixable': 'Enable resource_proxy',
'quality': quality}
else:
return {'can_preview': False, 'quality': quality}
return correct_format and can_preview_from_domain
def preview_template(self, context, data_dict):
return 'dataviewer/geojson.html'
def setup_template_variables(self, context, data_dict):
import ckanext.resourceproxy.plugin as proxy
self.same_domain = data_dict['resource'].get('on_same_domain')
if self.proxy_enabled and not self.same_domain:
data_dict['resource']['original_url'] = \
data_dict['resource'].get('url')
data_dict['resource']['url'] = \
proxy.get_proxified_resource_url(data_dict)
# ITemplateHelpers
def get_helpers(self):
return {
'get_common_map_config_geojson': get_common_map_config,
}
class GeoJSONPreview(GeoJSONView):
pass
class WMTSView(GeoViewBase):
p.implements(p.ITemplateHelpers, inherit=True)
WMTS = ['wmts']
# IResourceView (CKAN >=2.3)
def info(self):
return {'name': 'wmts_view',
'title': 'wmts',
'icon': 'map-marker',
'iframed': True,
'default_title': p.toolkit._('WMTS'),
}
def can_view(self, data_dict):
resource = data_dict['resource']
format_lower = resource.get('format', '').lower()
if format_lower in self.WMTS:
return self.same_domain or self.proxy_enabled
return False
def view_template(self, context, data_dict):
return 'dataviewer/wmts.html'
# IResourcePreview (CKAN < 2.3)
def can_preview(self, data_dict):
format_lower = data_dict['resource']['format'].lower()
correct_format = format_lower in self.WMTS
can_preview_from_domain = (self.proxy_enabled or
data_dict['resource'].get('on_same_domain'))
quality = 2
if p.toolkit.check_ckan_version('2.1'):
if correct_format:
if can_preview_from_domain:
return {'can_preview': True, 'quality': quality}
else:
return {'can_preview': False,
'fixable': 'Enable resource_proxy',
'quality': quality}
else:
return {'can_preview': False, 'quality': quality}
return correct_format and can_preview_from_domain
def preview_template(self, context, data_dict):
return 'dataviewer/wmts.html'
def setup_template_variables(self, context, data_dict):
import ckanext.resourceproxy.plugin as proxy
self.same_domain = data_dict['resource'].get('on_same_domain')
if self.proxy_enabled and not self.same_domain:
data_dict['resource']['original_url'] = \
data_dict['resource'].get('url')
data_dict['resource']['url'] = \
proxy.get_proxified_resource_url(data_dict)
## ITemplateHelpers
def get_helpers(self):
return {
'get_common_map_config_wmts' : get_common_map_config,
}
class WMTSPreview(WMTSView):
pass
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains operator to move data from Hive to S3 bucket."""
import bz2
import gzip
import os
import tempfile
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import Dict, Optional, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
from airflow.utils.compression import uncompress_file
from airflow.utils.decorators import apply_defaults
class S3ToHiveOperator(BaseOperator): # pylint: disable=too-many-instance-attributes
"""
Moves data from S3 to Hive. The operator downloads a file from S3,
stores the file locally before loading it into a Hive table.
If the ``create`` or ``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata from.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param s3_key: The key to be retrieved from S3. (templated)
:type s3_key: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values
:type field_dict: dict
:param hive_table: target Hive table, use dot notation to target a
specific database. (templated)
:type hive_table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values. (templated)
:type partition: dict
:param headers: whether the file contains column names on the first
line
:type headers: bool
:param check_headers: whether the column names on the first line should be
checked against the keys of field_dict
:type check_headers: bool
:param wildcard_match: whether the s3_key should be interpreted as a Unix
wildcard pattern
:type wildcard_match: bool
:param aws_conn_id: source s3 connection
:type aws_conn_id: str
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type verify: bool or str
:param hive_cli_conn_id: destination hive connection
:type hive_cli_conn_id: str
:param input_compressed: Boolean to determine if file decompression is
required to process headers
:type input_compressed: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
:param select_expression: S3 Select expression
:type select_expression: str
"""
template_fields = ('s3_key', 'partition', 'hive_table')
template_ext = ()
ui_color = '#a0e08c'
@apply_defaults
def __init__( # pylint: disable=too-many-arguments
self,
*,
s3_key: str,
field_dict: Dict,
hive_table: str,
delimiter: str = ',',
create: bool = True,
recreate: bool = False,
partition: Optional[Dict] = None,
headers: bool = False,
check_headers: bool = False,
wildcard_match: bool = False,
aws_conn_id: str = 'aws_default',
verify: Optional[Union[bool, str]] = None,
hive_cli_conn_id: str = 'hive_cli_default',
input_compressed: bool = False,
tblproperties: Optional[Dict] = None,
select_expression: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_key = s3_key
self.field_dict = field_dict
self.hive_table = hive_table
self.delimiter = delimiter
self.create = create
self.recreate = recreate
self.partition = partition
self.headers = headers
self.check_headers = check_headers
self.wildcard_match = wildcard_match
self.hive_cli_conn_id = hive_cli_conn_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.input_compressed = input_compressed
self.tblproperties = tblproperties
self.select_expression = select_expression
if self.check_headers and not (self.field_dict is not None and self.headers):
raise AirflowException("To check_headers provide " + "field_dict and headers")
def execute(self, context):
# Downloading file from S3
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
hive_hook = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
self.log.info("Downloading S3 file")
if self.wildcard_match:
if not s3_hook.check_for_wildcard_key(self.s3_key):
raise AirflowException(f"No key matches {self.s3_key}")
s3_key_object = s3_hook.get_wildcard_key(self.s3_key)
else:
if not s3_hook.check_for_key(self.s3_key):
raise AirflowException(f"The key {self.s3_key} does not exists")
s3_key_object = s3_hook.get_key(self.s3_key)
_, file_ext = os.path.splitext(s3_key_object.key)
if self.select_expression and self.input_compressed and file_ext.lower() != '.gz':
raise AirflowException("GZIP is the only compression " + "format Amazon S3 Select supports")
with TemporaryDirectory(prefix='tmps32hive_') as tmp_dir, NamedTemporaryFile(
mode="wb", dir=tmp_dir, suffix=file_ext
) as f:
self.log.info("Dumping S3 key %s contents to local file %s", s3_key_object.key, f.name)
if self.select_expression:
option = {}
if self.headers:
option['FileHeaderInfo'] = 'USE'
if self.delimiter:
option['FieldDelimiter'] = self.delimiter
input_serialization = {'CSV': option}
if self.input_compressed:
input_serialization['CompressionType'] = 'GZIP'
content = s3_hook.select_key(
bucket_name=s3_key_object.bucket_name,
key=s3_key_object.key,
expression=self.select_expression,
input_serialization=input_serialization,
)
f.write(content.encode("utf-8"))
else:
s3_key_object.download_fileobj(f)
f.flush()
if self.select_expression or not self.headers:
self.log.info("Loading file %s into Hive", f.name)
hive_hook.load_file(
f.name,
self.hive_table,
field_dict=self.field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties,
)
else:
# Decompressing file
if self.input_compressed:
self.log.info("Uncompressing file %s", f.name)
fn_uncompressed = uncompress_file(f.name, file_ext, tmp_dir)
self.log.info("Uncompressed to %s", fn_uncompressed)
# uncompressed file available now so deleting
# compressed file to save disk space
f.close()
else:
fn_uncompressed = f.name
# Testing if header matches field_dict
if self.check_headers:
self.log.info("Matching file header against field_dict")
header_list = self._get_top_row_as_list(fn_uncompressed)
if not self._match_headers(header_list):
raise AirflowException("Header check failed")
# Deleting top header row
self.log.info("Removing header from file %s", fn_uncompressed)
headless_file = self._delete_top_row_and_compress(fn_uncompressed, file_ext, tmp_dir)
self.log.info("Headless file %s", headless_file)
self.log.info("Loading file %s into Hive", headless_file)
hive_hook.load_file(
headless_file,
self.hive_table,
field_dict=self.field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties,
)
def _get_top_row_as_list(self, file_name):
with open(file_name) as file:
header_line = file.readline().strip()
header_list = header_line.split(self.delimiter)
return header_list
def _match_headers(self, header_list):
if not header_list:
raise AirflowException("Unable to retrieve header row from file")
field_names = self.field_dict.keys()
if len(field_names) != len(header_list):
self.log.warning(
"Headers count mismatch File headers:\n %s\nField names: \n %s\n", header_list, field_names
)
return False
test_field_match = [h1.lower() == h2.lower() for h1, h2 in zip(header_list, field_names)]
if not all(test_field_match):
self.log.warning(
"Headers do not match field names File headers:\n %s\nField names: \n %s\n",
header_list,
field_names,
)
return False
else:
return True
@staticmethod
def _delete_top_row_and_compress(input_file_name, output_file_ext, dest_dir):
# When output_file_ext is not defined, file is not compressed
open_fn = open
if output_file_ext.lower() == '.gz':
open_fn = gzip.GzipFile
elif output_file_ext.lower() == '.bz2':
open_fn = bz2.BZ2File
_, fn_output = tempfile.mkstemp(suffix=output_file_ext, dir=dest_dir)
with open(input_file_name, 'rb') as f_in, open_fn(fn_output, 'wb') as f_out:
f_in.seek(0)
next(f_in)
for line in f_in:
f_out.write(line)
return fn_output
|
|
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Simple Stubs."""
# TODO(https://github.com/grpc/grpc/issues/21965): Run under setuptools.
import os
_MAXIMUM_CHANNELS = 10
os.environ["GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS"] = "1"
os.environ["GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM"] = str(_MAXIMUM_CHANNELS)
import contextlib
import datetime
import inspect
import logging
import unittest
import sys
import time
from typing import Callable, Optional
from tests.unit import test_common
import grpc
import grpc.experimental
_REQUEST = b"0000"
_CACHE_EPOCHS = 8
_CACHE_TRIALS = 6
_SERVER_RESPONSE_COUNT = 10
_CLIENT_REQUEST_COUNT = _SERVER_RESPONSE_COUNT
_STRESS_EPOCHS = _MAXIMUM_CHANNELS * 10
_UNARY_UNARY = "/test/UnaryUnary"
_UNARY_STREAM = "/test/UnaryStream"
_STREAM_UNARY = "/test/StreamUnary"
_STREAM_STREAM = "/test/StreamStream"
def _unary_unary_handler(request, context):
return request
def _unary_stream_handler(request, context):
for _ in range(_SERVER_RESPONSE_COUNT):
yield request
def _stream_unary_handler(request_iterator, context):
request = None
for single_request in request_iterator:
request = single_request
return request
def _stream_stream_handler(request_iterator, context):
for request in request_iterator:
yield request
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return grpc.unary_unary_rpc_method_handler(_unary_unary_handler)
elif handler_call_details.method == _UNARY_STREAM:
return grpc.unary_stream_rpc_method_handler(_unary_stream_handler)
elif handler_call_details.method == _STREAM_UNARY:
return grpc.stream_unary_rpc_method_handler(_stream_unary_handler)
elif handler_call_details.method == _STREAM_STREAM:
return grpc.stream_stream_rpc_method_handler(_stream_stream_handler)
else:
raise NotImplementedError()
def _time_invocation(to_time: Callable[[], None]) -> datetime.timedelta:
start = datetime.datetime.now()
to_time()
return datetime.datetime.now() - start
@contextlib.contextmanager
def _server(credentials: Optional[grpc.ServerCredentials]):
try:
server = test_common.test_server()
target = '[::]:0'
if credentials is None:
port = server.add_insecure_port(target)
else:
port = server.add_secure_port(target, credentials)
server.add_generic_rpc_handlers((_GenericHandler(),))
server.start()
yield port
finally:
server.stop(None)
class SimpleStubsTest(unittest.TestCase):
def assert_cached(self, to_check: Callable[[str], None]) -> None:
"""Asserts that a function caches intermediate data/state.
To be specific, given a function whose caching behavior is
deterministic in the value of a supplied string, this function asserts
that, on average, subsequent invocations of the function for a specific
string are faster than first invocations with that same string.
Args:
to_check: A function returning nothing, that caches values based on
an arbitrary supplied string.
"""
initial_runs = []
cached_runs = []
for epoch in range(_CACHE_EPOCHS):
runs = []
text = str(epoch)
for trial in range(_CACHE_TRIALS):
runs.append(_time_invocation(lambda: to_check(text)))
initial_runs.append(runs[0])
cached_runs.extend(runs[1:])
average_cold = sum((run for run in initial_runs),
datetime.timedelta()) / len(initial_runs)
average_warm = sum((run for run in cached_runs),
datetime.timedelta()) / len(cached_runs)
self.assertLess(average_warm, average_cold)
def assert_eventually(self,
predicate: Callable[[], bool],
*,
timeout: Optional[datetime.timedelta] = None,
message: Optional[Callable[[], str]] = None) -> None:
message = message or (lambda: "Proposition did not evaluate to true")
timeout = timeout or datetime.timedelta(seconds=10)
end = datetime.datetime.now() + timeout
while datetime.datetime.now() < end:
if predicate():
break
time.sleep(0.5)
else:
self.fail(message() + " after " + str(timeout))
def test_unary_unary_insecure(self):
with _server(None) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.experimental.
insecure_channel_credentials())
self.assertEqual(_REQUEST, response)
def test_unary_unary_secure(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.local_channel_credentials())
self.assertEqual(_REQUEST, response)
def test_channels_cached(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
test_name = inspect.stack()[0][3]
args = (_REQUEST, target, _UNARY_UNARY)
kwargs = {"channel_credentials": grpc.local_channel_credentials()}
def _invoke(seed: str):
run_kwargs = dict(kwargs)
run_kwargs["options"] = ((test_name + seed, ""),)
grpc.experimental.unary_unary(*args, **run_kwargs)
self.assert_cached(_invoke)
def test_channels_evicted(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.local_channel_credentials())
self.assert_eventually(
lambda: grpc._simple_stubs.ChannelCache.get(
)._test_only_channel_count() == 0,
message=lambda:
f"{grpc._simple_stubs.ChannelCache.get()._test_only_channel_count()} remain"
)
def test_total_channels_enforced(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for i in range(_STRESS_EPOCHS):
# Ensure we get a new channel each time.
options = (("foo", str(i)),)
# Send messages at full blast.
grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
options=options,
channel_credentials=grpc.local_channel_credentials())
self.assert_eventually(
lambda: grpc._simple_stubs.ChannelCache.get(
)._test_only_channel_count() <= _MAXIMUM_CHANNELS + 1,
message=lambda:
f"{grpc._simple_stubs.ChannelCache.get()._test_only_channel_count()} channels remain"
)
def test_unary_stream(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for response in grpc.experimental.unary_stream(
_REQUEST,
target,
_UNARY_STREAM,
channel_credentials=grpc.local_channel_credentials()):
self.assertEqual(_REQUEST, response)
def test_stream_unary(self):
def request_iter():
for _ in range(_CLIENT_REQUEST_COUNT):
yield _REQUEST
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.stream_unary(
request_iter(),
target,
_STREAM_UNARY,
channel_credentials=grpc.local_channel_credentials())
self.assertEqual(_REQUEST, response)
def test_stream_stream(self):
def request_iter():
for _ in range(_CLIENT_REQUEST_COUNT):
yield _REQUEST
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for response in grpc.experimental.stream_stream(
request_iter(),
target,
_STREAM_STREAM,
channel_credentials=grpc.local_channel_credentials()):
self.assertEqual(_REQUEST, response)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main(verbosity=2)
|
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import six
import struct
import inspect
from nose.tools import ok_, eq_, nottest
from ryu.ofproto import ether
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import lldp
from ryu.lib import addrconv
LOG = logging.getLogger(__name__)
class TestLLDPMandatoryTLV(unittest.TestCase):
def setUp(self):
# sample data is based on:
# http://wiki.wireshark.org/LinkLayerDiscoveryProtocol
#
# mandatory TLV only
self.data = b'\x01\x80\xc2\x00\x00\x0e\x00\x04' \
+ b'\x96\x1f\xa7\x26\x88\xcc\x02\x07' \
+ b'\x04\x00\x04\x96\x1f\xa7\x26\x04' \
+ b'\x04\x05\x31\x2f\x33\x06\x02\x00' \
+ b'\x78\x00\x00'
def tearDown(self):
pass
def test_get_tlv_type(self):
buf = b'\x02\x07\x04\x00\x04\x96\x1f\xa7\x26'
eq_(lldp.LLDPBasicTLV.get_type(buf), lldp.LLDP_TLV_CHASSIS_ID)
def test_parse_without_ethernet(self):
buf = self.data[ethernet.ethernet._MIN_LEN:]
(lldp_pkt, cls, rest_buf) = lldp.lldp.parser(buf)
eq_(len(rest_buf), 0)
tlvs = lldp_pkt.tlvs
eq_(tlvs[0].tlv_type, lldp.LLDP_TLV_CHASSIS_ID)
eq_(tlvs[0].len, 7)
eq_(tlvs[0].subtype, lldp.ChassisID.SUB_MAC_ADDRESS)
eq_(tlvs[0].chassis_id, b'\x00\x04\x96\x1f\xa7\x26')
eq_(tlvs[1].tlv_type, lldp.LLDP_TLV_PORT_ID)
eq_(tlvs[1].len, 4)
eq_(tlvs[1].subtype, lldp.PortID.SUB_INTERFACE_NAME)
eq_(tlvs[1].port_id, b'1/3')
eq_(tlvs[2].tlv_type, lldp.LLDP_TLV_TTL)
eq_(tlvs[2].len, 2)
eq_(tlvs[2].ttl, 120)
eq_(tlvs[3].tlv_type, lldp.LLDP_TLV_END)
def test_parse(self):
buf = self.data
pkt = packet.Packet(buf)
i = iter(pkt)
eq_(type(next(i)), ethernet.ethernet)
eq_(type(next(i)), lldp.lldp)
def test_tlv(self):
tlv = lldp.ChassisID(subtype=lldp.ChassisID.SUB_MAC_ADDRESS,
chassis_id=b'\x00\x04\x96\x1f\xa7\x26')
eq_(tlv.tlv_type, lldp.LLDP_TLV_CHASSIS_ID)
eq_(tlv.len, 7)
(typelen, ) = struct.unpack('!H', b'\x02\x07')
eq_(tlv.typelen, typelen)
def test_serialize_without_ethernet(self):
tlv_chassis_id = lldp.ChassisID(subtype=lldp.ChassisID.SUB_MAC_ADDRESS,
chassis_id=b'\x00\x04\x96\x1f\xa7\x26')
tlv_port_id = lldp.PortID(subtype=lldp.PortID.SUB_INTERFACE_NAME,
port_id=b'1/3')
tlv_ttl = lldp.TTL(ttl=120)
tlv_end = lldp.End()
tlvs = (tlv_chassis_id, tlv_port_id, tlv_ttl, tlv_end)
lldp_pkt = lldp.lldp(tlvs)
eq_(lldp_pkt.serialize(None, None),
self.data[ethernet.ethernet._MIN_LEN:])
def test_serialize(self):
pkt = packet.Packet()
dst = lldp.LLDP_MAC_NEAREST_BRIDGE
src = '00:04:96:1f:a7:26'
ethertype = ether.ETH_TYPE_LLDP
eth_pkt = ethernet.ethernet(dst, src, ethertype)
pkt.add_protocol(eth_pkt)
tlv_chassis_id = lldp.ChassisID(subtype=lldp.ChassisID.SUB_MAC_ADDRESS,
chassis_id=addrconv.mac.
text_to_bin(src))
tlv_port_id = lldp.PortID(subtype=lldp.PortID.SUB_INTERFACE_NAME,
port_id=b'1/3')
tlv_ttl = lldp.TTL(ttl=120)
tlv_end = lldp.End()
tlvs = (tlv_chassis_id, tlv_port_id, tlv_ttl, tlv_end)
lldp_pkt = lldp.lldp(tlvs)
pkt.add_protocol(lldp_pkt)
eq_(len(pkt.protocols), 2)
pkt.serialize()
# Note: If ethernet frame is less than 60 bytes length,
# ethernet.ethernet() appends padding to the payload.
# So, we splits the serialized data to compare.
data_len = len(self.data)
pkt_data_lldp = pkt.data[:data_len]
pkt_data_pad = pkt.data[data_len:]
eq_(b'\x00' * (60 - data_len), pkt_data_pad)
eq_(self.data, pkt_data_lldp)
def test_to_string(self):
chassis_id = lldp.ChassisID(subtype=lldp.ChassisID.SUB_MAC_ADDRESS,
chassis_id=b'\x00\x04\x96\x1f\xa7\x26')
port_id = lldp.PortID(subtype=lldp.PortID.SUB_INTERFACE_NAME,
port_id=b'1/3')
ttl = lldp.TTL(ttl=120)
end = lldp.End()
tlvs = (chassis_id, port_id, ttl, end)
lldp_pkt = lldp.lldp(tlvs)
chassis_id_values = {'subtype': lldp.ChassisID.SUB_MAC_ADDRESS,
'chassis_id': b'\x00\x04\x96\x1f\xa7\x26',
'len': chassis_id.len,
'typelen': chassis_id.typelen}
_ch_id_str = ','.join(['%s=%s' % (k, repr(chassis_id_values[k]))
for k, v in inspect.getmembers(chassis_id)
if k in chassis_id_values])
tlv_chassis_id_str = '%s(%s)' % (lldp.ChassisID.__name__, _ch_id_str)
port_id_values = {'subtype': port_id.subtype,
'port_id': port_id.port_id,
'len': port_id.len,
'typelen': port_id.typelen}
_port_id_str = ','.join(['%s=%s' % (k, repr(port_id_values[k]))
for k, v in inspect.getmembers(port_id)
if k in port_id_values])
tlv_port_id_str = '%s(%s)' % (lldp.PortID.__name__, _port_id_str)
ttl_values = {'ttl': ttl.ttl,
'len': ttl.len,
'typelen': ttl.typelen}
_ttl_str = ','.join(['%s=%s' % (k, repr(ttl_values[k]))
for k, v in inspect.getmembers(ttl)
if k in ttl_values])
tlv_ttl_str = '%s(%s)' % (lldp.TTL.__name__, _ttl_str)
end_values = {'len': end.len,
'typelen': end.typelen}
_end_str = ','.join(['%s=%s' % (k, repr(end_values[k]))
for k, v in inspect.getmembers(end)
if k in end_values])
tlv_end_str = '%s(%s)' % (lldp.End.__name__, _end_str)
_tlvs_str = '(%s, %s, %s, %s)'
tlvs_str = _tlvs_str % (tlv_chassis_id_str,
tlv_port_id_str,
tlv_ttl_str,
tlv_end_str)
_lldp_str = '%s(tlvs=%s)'
lldp_str = _lldp_str % (lldp.lldp.__name__,
tlvs_str)
eq_(str(lldp_pkt), lldp_str)
eq_(repr(lldp_pkt), lldp_str)
def test_json(self):
chassis_id = lldp.ChassisID(subtype=lldp.ChassisID.SUB_MAC_ADDRESS,
chassis_id=b'\x00\x04\x96\x1f\xa7\x26')
port_id = lldp.PortID(subtype=lldp.PortID.SUB_INTERFACE_NAME,
port_id=b'1/3')
ttl = lldp.TTL(ttl=120)
end = lldp.End()
tlvs = (chassis_id, port_id, ttl, end)
lldp1 = lldp.lldp(tlvs)
jsondict = lldp1.to_jsondict()
lldp2 = lldp.lldp.from_jsondict(jsondict['lldp'])
eq_(str(lldp1), str(lldp2))
class TestLLDPOptionalTLV(unittest.TestCase):
def setUp(self):
# sample data is based on:
# http://wiki.wireshark.org/LinkLayerDiscoveryProtocol
#
# include optional TLV
self.data = b'\x01\x80\xc2\x00\x00\x0e\x00\x01' \
+ b'\x30\xf9\xad\xa0\x88\xcc\x02\x07' \
+ b'\x04\x00\x01\x30\xf9\xad\xa0\x04' \
+ b'\x04\x05\x31\x2f\x31\x06\x02\x00' \
+ b'\x78\x08\x17\x53\x75\x6d\x6d\x69' \
+ b'\x74\x33\x30\x30\x2d\x34\x38\x2d' \
+ b'\x50\x6f\x72\x74\x20\x31\x30\x30' \
+ b'\x31\x00\x0a\x0d\x53\x75\x6d\x6d' \
+ b'\x69\x74\x33\x30\x30\x2d\x34\x38' \
+ b'\x00\x0c\x4c\x53\x75\x6d\x6d\x69' \
+ b'\x74\x33\x30\x30\x2d\x34\x38\x20' \
+ b'\x2d\x20\x56\x65\x72\x73\x69\x6f' \
+ b'\x6e\x20\x37\x2e\x34\x65\x2e\x31' \
+ b'\x20\x28\x42\x75\x69\x6c\x64\x20' \
+ b'\x35\x29\x20\x62\x79\x20\x52\x65' \
+ b'\x6c\x65\x61\x73\x65\x5f\x4d\x61' \
+ b'\x73\x74\x65\x72\x20\x30\x35\x2f' \
+ b'\x32\x37\x2f\x30\x35\x20\x30\x34' \
+ b'\x3a\x35\x33\x3a\x31\x31\x00\x0e' \
+ b'\x05\x01\x00\x14\x00\x14\x10\x0e' \
+ b'\x07' \
+ b'\x06\x00\x01\x30\xf9\xad\xa0\x02' \
+ b'\x00\x00\x03\xe9\x00\xfe\x07\x00' \
+ b'\x12\x0f\x02\x07\x01\x00\xfe\x09' \
+ b'\x00\x12\x0f\x01\x03\x6c\x00\x00' \
+ b'\x10\xfe\x09\x00\x12\x0f\x03\x01' \
+ b'\x00\x00\x00\x00\xfe\x06\x00\x12' \
+ b'\x0f\x04\x05\xf2\xfe\x06\x00\x80' \
+ b'\xc2\x01\x01\xe8\xfe\x07\x00\x80' \
+ b'\xc2\x02\x01\x00\x00\xfe\x17\x00' \
+ b'\x80\xc2\x03\x01\xe8\x10\x76\x32' \
+ b'\x2d\x30\x34\x38\x38\x2d\x30\x33' \
+ b'\x2d\x30\x35\x30\x35\x00\xfe\x05' \
+ b'\x00\x80\xc2\x04\x00\x00\x00'
def tearDown(self):
pass
def test_parse(self):
buf = self.data
pkt = packet.Packet(buf)
i = iter(pkt)
eq_(type(next(i)), ethernet.ethernet)
lldp_pkt = next(i)
eq_(type(lldp_pkt), lldp.lldp)
tlvs = lldp_pkt.tlvs
# Port Description
eq_(tlvs[3].tlv_type, lldp.LLDP_TLV_PORT_DESCRIPTION)
eq_(tlvs[3].port_description, b'Summit300-48-Port 1001\x00')
# System Name
eq_(tlvs[4].tlv_type, lldp.LLDP_TLV_SYSTEM_NAME)
eq_(tlvs[4].system_name, b'Summit300-48\x00')
# System Description
eq_(tlvs[5].tlv_type, lldp.LLDP_TLV_SYSTEM_DESCRIPTION)
eq_(tlvs[5].system_description,
b'Summit300-48 - Version 7.4e.1 (Build 5) '
+ b'by Release_Master 05/27/05 04:53:11\x00')
# SystemCapabilities
eq_(tlvs[6].tlv_type, lldp.LLDP_TLV_SYSTEM_CAPABILITIES)
eq_(tlvs[6].subtype, lldp.ChassisID.SUB_CHASSIS_COMPONENT)
eq_(tlvs[6].system_cap & lldp.SystemCapabilities.CAP_MAC_BRIDGE,
lldp.SystemCapabilities.CAP_MAC_BRIDGE)
eq_(tlvs[6].enabled_cap & lldp.SystemCapabilities.CAP_MAC_BRIDGE,
lldp.SystemCapabilities.CAP_MAC_BRIDGE)
eq_(tlvs[6].system_cap & lldp.SystemCapabilities.CAP_TELEPHONE, 0)
eq_(tlvs[6].enabled_cap & lldp.SystemCapabilities.CAP_TELEPHONE, 0)
# Management Address
eq_(tlvs[7].tlv_type, lldp.LLDP_TLV_MANAGEMENT_ADDRESS)
eq_(tlvs[7].addr_len, 7)
eq_(tlvs[7].addr, b'\x00\x01\x30\xf9\xad\xa0')
eq_(tlvs[7].intf_num, 1001)
# Organizationally Specific
eq_(tlvs[8].tlv_type, lldp.LLDP_TLV_ORGANIZATIONALLY_SPECIFIC)
eq_(tlvs[8].oui, b'\x00\x12\x0f') # IEEE 802.3
eq_(tlvs[8].subtype, 0x02) # Power Via MDI
# End
eq_(tlvs[16].tlv_type, lldp.LLDP_TLV_END)
def test_parse_corrupted(self):
buf = self.data
pkt = packet.Packet(buf[:128])
def test_serialize(self):
pkt = packet.Packet()
dst = lldp.LLDP_MAC_NEAREST_BRIDGE
src = '00:01:30:f9:ad:a0'
ethertype = ether.ETH_TYPE_LLDP
eth_pkt = ethernet.ethernet(dst, src, ethertype)
pkt.add_protocol(eth_pkt)
tlv_chassis_id = lldp.ChassisID(subtype=lldp.ChassisID.SUB_MAC_ADDRESS,
chassis_id=addrconv.mac.
text_to_bin(src))
tlv_port_id = lldp.PortID(subtype=lldp.PortID.SUB_INTERFACE_NAME,
port_id=b'1/1')
tlv_ttl = lldp.TTL(ttl=120)
tlv_port_description = lldp.PortDescription(
port_description=b'Summit300-48-Port 1001\x00')
tlv_system_name = lldp.SystemName(system_name=b'Summit300-48\x00')
tlv_system_description = lldp.SystemDescription(
system_description=b'Summit300-48 - Version 7.4e.1 (Build 5) '
+ b'by Release_Master 05/27/05 04:53:11\x00')
tlv_system_capabilities = lldp.SystemCapabilities(
subtype=lldp.ChassisID.SUB_CHASSIS_COMPONENT,
system_cap=0x14,
enabled_cap=0x14)
tlv_management_address = lldp.ManagementAddress(
addr_subtype=0x06, addr=b'\x00\x01\x30\xf9\xad\xa0',
intf_subtype=0x02, intf_num=1001,
oid=b'')
tlv_organizationally_specific = lldp.OrganizationallySpecific(
oui=b'\x00\x12\x0f', subtype=0x02, info=b'\x07\x01\x00')
tlv_end = lldp.End()
tlvs = (tlv_chassis_id, tlv_port_id, tlv_ttl, tlv_port_description,
tlv_system_name, tlv_system_description,
tlv_system_capabilities, tlv_management_address,
tlv_organizationally_specific, tlv_end)
lldp_pkt = lldp.lldp(tlvs)
pkt.add_protocol(lldp_pkt)
eq_(len(pkt.protocols), 2)
pkt.serialize()
# self.data has many organizationally specific TLVs
data = six.binary_type(pkt.data[:-2])
eq_(data, self.data[:len(data)])
def test_to_string(self):
chassis_id = lldp.ChassisID(subtype=lldp.ChassisID.SUB_MAC_ADDRESS,
chassis_id=b'\x00\x01\x30\xf9\xad\xa0')
port_id = lldp.PortID(subtype=lldp.PortID.SUB_INTERFACE_NAME,
port_id=b'1/1')
ttl = lldp.TTL(ttl=120)
port_desc = lldp.PortDescription(
port_description=b'Summit300-48-Port 1001\x00')
sys_name = lldp.SystemName(system_name=b'Summit300-48\x00')
sys_desc = lldp.SystemDescription(
system_description=b'Summit300-48 - Version 7.4e.1 (Build 5) '
+ b'by Release_Master 05/27/05 04:53:11\x00')
sys_cap = lldp.SystemCapabilities(
subtype=lldp.ChassisID.SUB_CHASSIS_COMPONENT,
system_cap=0x14,
enabled_cap=0x14)
man_addr = lldp.ManagementAddress(
addr_subtype=0x06, addr=b'\x00\x01\x30\xf9\xad\xa0',
intf_subtype=0x02, intf_num=1001,
oid='')
org_spec = lldp.OrganizationallySpecific(
oui=b'\x00\x12\x0f', subtype=0x02, info=b'\x07\x01\x00')
end = lldp.End()
tlvs = (chassis_id, port_id, ttl, port_desc, sys_name,
sys_desc, sys_cap, man_addr, org_spec, end)
lldp_pkt = lldp.lldp(tlvs)
# ChassisID string
chassis_id_values = {'subtype': lldp.ChassisID.SUB_MAC_ADDRESS,
'chassis_id': b'\x00\x01\x30\xf9\xad\xa0',
'len': chassis_id.len,
'typelen': chassis_id.typelen}
_ch_id_str = ','.join(['%s=%s' % (k, repr(chassis_id_values[k]))
for k, v in inspect.getmembers(chassis_id)
if k in chassis_id_values])
tlv_chassis_id_str = '%s(%s)' % (lldp.ChassisID.__name__, _ch_id_str)
# PortID string
port_id_values = {'subtype': port_id.subtype,
'port_id': port_id.port_id,
'len': port_id.len,
'typelen': port_id.typelen}
_port_id_str = ','.join(['%s=%s' % (k, repr(port_id_values[k]))
for k, v in inspect.getmembers(port_id)
if k in port_id_values])
tlv_port_id_str = '%s(%s)' % (lldp.PortID.__name__, _port_id_str)
# TTL string
ttl_values = {'ttl': ttl.ttl,
'len': ttl.len,
'typelen': ttl.typelen}
_ttl_str = ','.join(['%s=%s' % (k, repr(ttl_values[k]))
for k, v in inspect.getmembers(ttl)
if k in ttl_values])
tlv_ttl_str = '%s(%s)' % (lldp.TTL.__name__, _ttl_str)
# PortDescription string
port_desc_values = {'tlv_info': port_desc.tlv_info,
'len': port_desc.len,
'typelen': port_desc.typelen}
_port_desc_str = ','.join(['%s=%s' % (k, repr(port_desc_values[k]))
for k, v in inspect.getmembers(port_desc)
if k in port_desc_values])
tlv_port_desc_str = '%s(%s)' % (lldp.PortDescription.__name__,
_port_desc_str)
# SystemName string
sys_name_values = {'tlv_info': sys_name.tlv_info,
'len': sys_name.len,
'typelen': sys_name.typelen}
_system_name_str = ','.join(['%s=%s' % (k, repr(sys_name_values[k]))
for k, v in inspect.getmembers(sys_name)
if k in sys_name_values])
tlv_system_name_str = '%s(%s)' % (lldp.SystemName.__name__,
_system_name_str)
# SystemDescription string
sys_desc_values = {'tlv_info': sys_desc.tlv_info,
'len': sys_desc.len,
'typelen': sys_desc.typelen}
_sys_desc_str = ','.join(['%s=%s' % (k, repr(sys_desc_values[k]))
for k, v in inspect.getmembers(sys_desc)
if k in sys_desc_values])
tlv_sys_desc_str = '%s(%s)' % (lldp.SystemDescription.__name__,
_sys_desc_str)
# SystemCapabilities string
sys_cap_values = {'subtype': lldp.ChassisID.SUB_CHASSIS_COMPONENT,
'system_cap': 0x14,
'enabled_cap': 0x14,
'len': sys_cap.len,
'typelen': sys_cap.typelen}
_sys_cap_str = ','.join(['%s=%s' % (k, repr(sys_cap_values[k]))
for k, v in inspect.getmembers(sys_cap)
if k in sys_cap_values])
tlv_sys_cap_str = '%s(%s)' % (lldp.SystemCapabilities.__name__,
_sys_cap_str)
# ManagementAddress string
man_addr_values = {'addr_subtype': 0x06,
'addr': b'\x00\x01\x30\xf9\xad\xa0',
'addr_len': man_addr.addr_len,
'intf_subtype': 0x02,
'intf_num': 1001,
'oid': '',
'oid_len': man_addr.oid_len,
'len': man_addr.len,
'typelen': man_addr.typelen}
_man_addr_str = ','.join(['%s=%s' % (k, repr(man_addr_values[k]))
for k, v in inspect.getmembers(man_addr)
if k in man_addr_values])
tlv_man_addr_str = '%s(%s)' % (lldp.ManagementAddress.__name__,
_man_addr_str)
# OrganizationallySpecific string
org_spec_values = {'oui': b'\x00\x12\x0f',
'subtype': 0x02,
'info': b'\x07\x01\x00',
'len': org_spec.len,
'typelen': org_spec.typelen}
_org_spec_str = ','.join(['%s=%s' % (k, repr(org_spec_values[k]))
for k, v in inspect.getmembers(org_spec)
if k in org_spec_values])
tlv_org_spec_str = '%s(%s)' % (lldp.OrganizationallySpecific.__name__,
_org_spec_str)
# End string
end_values = {'len': end.len,
'typelen': end.typelen}
_end_str = ','.join(['%s=%s' % (k, repr(end_values[k]))
for k, v in inspect.getmembers(end)
if k in end_values])
tlv_end_str = '%s(%s)' % (lldp.End.__name__, _end_str)
# tlvs string
_tlvs_str = '(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
tlvs_str = _tlvs_str % (tlv_chassis_id_str,
tlv_port_id_str,
tlv_ttl_str,
tlv_port_desc_str,
tlv_system_name_str,
tlv_sys_desc_str,
tlv_sys_cap_str,
tlv_man_addr_str,
tlv_org_spec_str,
tlv_end_str)
# lldp string
_lldp_str = '%s(tlvs=%s)'
lldp_str = _lldp_str % (lldp.lldp.__name__,
tlvs_str)
eq_(str(lldp_pkt), lldp_str)
eq_(repr(lldp_pkt), lldp_str)
def test_json(self):
chassis_id = lldp.ChassisID(subtype=lldp.ChassisID.SUB_MAC_ADDRESS,
chassis_id=b'\x00\x01\x30\xf9\xad\xa0')
port_id = lldp.PortID(subtype=lldp.PortID.SUB_INTERFACE_NAME,
port_id=b'1/1')
ttl = lldp.TTL(ttl=120)
port_desc = lldp.PortDescription(
port_description=b'Summit300-48-Port 1001\x00')
sys_name = lldp.SystemName(system_name=b'Summit300-48\x00')
sys_desc = lldp.SystemDescription(
system_description=b'Summit300-48 - Version 7.4e.1 (Build 5) '
+ b'by Release_Master 05/27/05 04:53:11\x00')
sys_cap = lldp.SystemCapabilities(
subtype=lldp.ChassisID.SUB_CHASSIS_COMPONENT,
system_cap=0x14,
enabled_cap=0x14)
man_addr = lldp.ManagementAddress(
addr_subtype=0x06, addr=b'\x00\x01\x30\xf9\xad\xa0',
intf_subtype=0x02, intf_num=1001,
oid='')
org_spec = lldp.OrganizationallySpecific(
oui=b'\x00\x12\x0f', subtype=0x02, info=b'\x07\x01\x00')
end = lldp.End()
tlvs = (chassis_id, port_id, ttl, port_desc, sys_name,
sys_desc, sys_cap, man_addr, org_spec, end)
lldp1 = lldp.lldp(tlvs)
jsondict = lldp1.to_jsondict()
lldp2 = lldp.lldp.from_jsondict(jsondict['lldp'])
eq_(str(lldp1), str(lldp2))
|
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
get_element_by_attribute,
int_or_none,
limit_length,
lowercase_escape,
try_get,
)
class InstagramIE(InfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
'md5': '0d2da106a9d2631273e192b372806516',
'info_dict': {
'id': 'aye83DjauH',
'ext': 'mp4',
'title': 'Video by naomipq',
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1371748545,
'upload_date': '20130620',
'uploader_id': 'naomipq',
'uploader': 'Naomi Leonor Phan-Quang',
'like_count': int,
'comment_count': int,
},
}, {
# missing description
'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
'info_dict': {
'id': 'BA-pQFBG8HZ',
'ext': 'mp4',
'uploader_id': 'britneyspears',
'title': 'Video by britneyspears',
'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1453760977,
'upload_date': '20160125',
'uploader_id': 'britneyspears',
'uploader': 'Britney Spears',
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://instagram.com/p/-Cmh1cukG2/',
'only_matching': True,
}, {
'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
'only_matching': True,
}]
@staticmethod
def _extract_embed_url(webpage):
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
webpage)
if mobj:
return mobj.group('url')
blockquote_el = get_element_by_attribute(
'class', 'instagram-media', webpage)
if blockquote_el is None:
return
mobj = re.search(
r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
if mobj:
return mobj.group('link')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = mobj.group('url')
webpage = self._download_webpage(url, video_id)
(video_url, description, thumbnail, timestamp, uploader,
uploader_id, like_count, comment_count) = [None] * 8
shared_data = self._parse_json(
self._search_regex(
r'window\._sharedData\s*=\s*({.+?});',
webpage, 'shared data', default='{}'),
video_id, fatal=False)
if shared_data:
media = try_get(
shared_data, lambda x: x['entry_data']['PostPage'][0]['media'], dict)
if media:
video_url = media.get('video_url')
description = media.get('caption')
thumbnail = media.get('display_src')
timestamp = int_or_none(media.get('date'))
uploader = media.get('owner', {}).get('full_name')
uploader_id = media.get('owner', {}).get('username')
like_count = int_or_none(media.get('likes', {}).get('count'))
comment_count = int_or_none(media.get('comments', {}).get('count'))
if not video_url:
video_url = self._og_search_video_url(webpage, secure=False)
if not uploader_id:
uploader_id = self._search_regex(
r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
webpage, 'uploader id', fatal=False)
if not description:
description = self._search_regex(
r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
if description is not None:
description = lowercase_escape(description)
if not thumbnail:
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': 'Video by %s' % uploader_id,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'uploader_id': uploader_id,
'uploader': uploader,
'like_count': like_count,
'comment_count': comment_count,
}
class InstagramUserIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
IE_DESC = 'Instagram user profile'
IE_NAME = 'instagram:user'
_TEST = {
'url': 'https://instagram.com/porsche',
'info_dict': {
'id': 'porsche',
'title': 'porsche',
},
'playlist_mincount': 2,
'playlist': [{
'info_dict': {
'id': '614605558512799803_462752227',
'ext': 'mp4',
'title': '#Porsche Intelligent Performance.',
'thumbnail': 're:^https?://.*\.jpg',
'uploader': 'Porsche',
'uploader_id': 'porsche',
'timestamp': 1387486713,
'upload_date': '20131219',
},
}],
'params': {
'extract_flat': True,
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader_id = mobj.group('username')
entries = []
page_count = 0
media_url = 'http://instagram.com/%s/media' % uploader_id
while True:
page = self._download_json(
media_url, uploader_id,
note='Downloading page %d ' % (page_count + 1),
)
page_count += 1
for it in page['items']:
if it.get('type') != 'video':
continue
like_count = int_or_none(it.get('likes', {}).get('count'))
user = it.get('user', {})
formats = [{
'format_id': k,
'height': v.get('height'),
'width': v.get('width'),
'url': v['url'],
} for k, v in it['videos'].items()]
self._sort_formats(formats)
thumbnails_el = it.get('images', {})
thumbnail = thumbnails_el.get('thumbnail', {}).get('url')
# In some cases caption is null, which corresponds to None
# in python. As a result, it.get('caption', {}) gives None
title = (it.get('caption') or {}).get('text', it['id'])
entries.append({
'id': it['id'],
'title': limit_length(title, 80),
'formats': formats,
'thumbnail': thumbnail,
'webpage_url': it.get('link'),
'uploader': user.get('full_name'),
'uploader_id': user.get('username'),
'like_count': like_count,
'timestamp': int_or_none(it.get('created_time')),
})
if not page['items']:
break
max_id = page['items'][-1]['id'].split('_')[0]
media_url = (
'http://instagram.com/%s/media?max_id=%s' % (
uploader_id, max_id))
return {
'_type': 'playlist',
'entries': entries,
'id': uploader_id,
'title': uploader_id,
}
|
|
import sys
import os
PROJECT_ROOT = os.path.dirname(__file__)
sys.path.append(os.path.join(PROJECT_ROOT,"scripts"))
sys.path.append(os.path.join(PROJECT_ROOT,"experimental"))
if len(sys.argv) >= 3 and "--help" not in sys.argv:
print sys.argv
args = sys.argv
mode_arg = args[args.index("-mode")+1]
print mode_arg
####################Prepare a Table for visualization in a GIS###############
if mode_arg.lower() == "prep_vis":
import Visualize_Simple as Viz
#Comma delimited listing of words to add together for Viz output
#Example: -words Washington,Seahawks
try:
words = args[args.index('-words')+1]
except:
print "You did not provide any words to visualize"
sys.exit("Error")
#Pointgrid table name
#Example: globalgrid_5_clip_geog
try:
gtbl = args[args.index('-gtbl')+1]
except:
print "You did not provide a grid table argument"
sys.exit("Error")
#Postgresql connection information
try:
conn_info = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
sys.exit("Error")
#Output Viz Table
try:
out_tbl = args[args.index('-out_tbl')+1]
except:
print "Did not provide a valid out table name"
out_tbl = "localspatstat_tmp"
#Statistics Table (Zavg/Gi*)
try:
stat_tbl = args[args.index("-stat_tbl")+1]
except:
print "You did not provide a name for a statistics table to use"
sys.exit("Error")
Viz.calc(words, gtbl, stat_tbl, out_tbl, conn_info)
##########################Local Spatial Stastics Mode (Gi*, Zavg)#######################
if mode_arg.lower() == "local_stats":
import LocalSpatialStatsV1 as LSS
print "Starting Local Spatial Statistics"
#Trainfile / devfile / test file
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
#Document Type: (wiki, twitter)
try:
traintype = args[args.index("-traintype")+1]
except:
print "You did not provide a training file type (wiki or twitter)"
sys.exit("Error")
#Document table name
try:
dtbl = args[args.index("-dtbl")+1]
except:
print "You did not provide a name for a document table to output"
sys.exit("Error")
#PROCESS ID (short term solution to multiprocess problems)
#try:
# procid = args[args.index("-procid")+1]
#except:
# print "You did not provide process id"
# sys.exit("Error")
#Pointgrid table name
try:
gtbl = args[args.index('-gtbl')+1]
except:
print "You did not provide a grid table argument"
sys.exit("Error")
#Postgresql connection information
try:
conn_info = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
sys.exit("Error")
#Which Statistic is going to be calculated
try:
statistic = args[args.index('-statistic')+1]
except:
print "You did not provide a statistic type"
print "e.g. -statistic gi -statistic zavg -statistic gi,zavg"
sys.exit("Error")
#Kernel Bandwidth Distance for local stats
try:
kern_dist = args[args.index('-kern_dist')+1]
except:
print "Did not provide a bandwidth size for the kern_dist kernel"
sys.exit("Error")
#Out file with local stats
try:
outf = args[args.index('-outf')+1]
except:
print "You did not provide an outfile name for where scores will be written"
outf = "tmp.txt"
#sys.exit("Error")
try:
grid_min = args[args.index('-grid_freq_min')+1]
except:
print "Did not provide a grid_freq_min argument... defaulting to 1"
grid_min = 0
try:
out_tbl = args[args.index('-out_tbl')+1]
except:
print "Did not provide a valid out table name"
out_tbl = "localspatstat_tmp"
#The number of cores you want to devote to multiprocessed Calcs
try:
cores = int(args[args.index('-cores')+1])
except:
print "Did not provide a -cores argument, defaulting to 1"
cores = 1
#Do you want to specify a whitelist to use for words (restircted vs any)
try:
listuse = args[args.index('-listuse')+1]
except:
print "Did not provide a valid listuse option, defaultign to any"
listuse = "any"
#Whitelist File path
try:
whitelist_file = args[args.index('-whitelist_file')+1]
except:
print "Did not provide a -whitelist_file option, defaulting to none"
whitelist_file = "none"
listuse = "any"
#Kerntype for kernel function (uniform, linear, epanech)
try:
kerntype = args[args.index('-kerntype')+1]
except:
print "Did not provide a valid kerntype option, defaulting to uniform"
kerntype = "uniform"
#Should probabilities of zero be written to tbl? (yes for similarity scores, no for Top Resolver)
try:
include_zero = args[args.index('-include_zero')+1]
if include_zero.lower() == "false":
include_zero = False
else: include_zero = True
except:
print "Did not provide include zero argument, defaulting to True"
include_zero = True
LSS.calc(f, statistic, dtbl, gtbl, conn_info, outf, out_tbl, kern_dist, kerntype, traintype, listuse, whitelist_file, grid_min, cores, include_zero)
##########################Load a database with | Doc ID | Geometry | table#####################
if mode_arg.lower() == "loaddb":
import LoadDBV1 as loadDB
print "Starting DB Load Process"
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
try:
traintype = args[args.index("-traintype")+1]
except:
print "You did not provide a training file type (wiki or twitter)"
sys.exit("Error")
try:
dtbl = args[args.index("-dtbl")+1]
except:
print "You did not provide a name for a document table to output"
sys.exit("Error")
try:
conn = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
sys.exit("Error")
loadDB.Load(f, dtbl, conn, traintype.lower())
################Weighted Jaccard Similarity##################
if mode_arg.lower() == "jsim":
print "Starting Weighted Jaccard Similarity Tests"
import JacSimilarity as JS
#Statistics Table (Zavg/Gi*)
try:
stat_tbl = args[args.index("-stat_tbl")+1]
except:
print "You did not provide a name for a statistics table to use"
sys.exit("Error")
#Statistics Table (Zavg/Gi*)
try:
stat_tbl_func = args[args.index("-stat_tbl_func")+1]
except:
print "You did not provide a name for a statistics table to use"
sys.exit("Error")
#Synset file
try:
synfile = args[args.index("-synfile")+1]
except:
print "You did not provide a file name to retrieve synsets from"
sys.exit("Error")
#Out file with p values
try:
outf = args[args.index('-outf')+1]
except:
print "You did not provide an outfile name for where scores will be written"
sys.exit("Error")
#Postgresql connection information
try:
conn_info = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
sys.exit("Error")
#How many random word comparisons to make in building similarity score distribution
#Used to derive P-value for given pair
try:
randits = int(args[args.index('-randits')+1])
except:
print "Did not provide -randits argument, defaulting to 100"
randits = 100
#Select a certain percentile of the obs to compare... not yet implemented
try:
pct = args[args.index('-pct')+1]
except:
pct = "Not Implemented"
JS.calc(stat_tbl, synfile, conn_info, pct, randits, outf, stat_tbl_func)
###################Test Toponym Resolver#####################
#Only used to test annotated datasets of TRCONLL, LGL, CWar
if mode_arg.lower() == "topo_test":
#The domain of texts that will be tested
#
try:
which_test = args[args.index("-test_domain")+1]
if which_test.strip().lower() == "lgl":
import TestResolver_LGL as tstr
if which_test.strip().lower() == "trconll":
import TestResolver_TRCONLL as tstr
if which_test.strip().lower() == "cwar":
import TestResolver_CWar as tstr
except:
print "-mode topo_test requires you to specify an addition -test_domain argument"
print "current options allow for -test_domain <lgl, trconll, cwar>"
print sys.exit("Error: exiting. See above")
#print "Starting test of topo resolver on TRConll"
#Statistics Table (Zavg/Gi*) for in domain statistics
try:
in_domain_stat_tbl = args[args.index("-indomain_stat_tbl")+1]
except:
print "You did not provide a name for an in domain statistics table to use"
print "Defaulting to None"
in_domain_stat_tbl = "None"
#sys.exit("Error")
#Statistics Table (Zavg/Gi*) for out of domain statistics
try:
out_domain_stat_tbl = args[args.index("-outdomain_stat_tbl")+1]
except:
print "You did not provide a name for an out of domain statistics table to use"
print "Defaulting to None"
out_domain_stat_tbl = "None"
if in_domain_stat_tbl == "None" and out_domain_stat_tbl == "None":
print "Error:"
print "You provided neither an in domain or out of domain stat table"
sys.exit("Error")
#Lambda weight applied to Gi* vectors from the in domain statistics table
try:
in_domain_lambda = args[args.index("-in_domain_lambda")+1]
except:
print ""
print "You did not provide a value for in domain lambda, defaulting to 0.0"
print ""
in_domain_lambda = 0.0
try:
out_domain_lambda = args[args.index("-out_domain_lambda")+1]
except:
print ""
print "You did not provide a value for out domain lambda, defaulting to 0.0"
print ""
out_domain_lambda = 0.0
if float(in_domain_lambda) == 0.0 and float(out_domain_lambda) == 0.0:
print "Error:"
print "A value of 0.0 was provided for both -in_domain_lambda and -out_domain_lambda"
sys.exit("Error")
#Train file, dev file, or tst file
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
#Postgresql connection information
try:
conn_info = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
sys.exit("Error")
#Pointgrid table name
try:
gtbl = args[args.index('-gtbl')+1]
except:
print "You did not provide a grid table argument"
sys.exit("Error")
#context window size
try:
window = int(args[args.index('-window')+1])
except:
print "You did not provide a window argument, defaulting to 15"
window = 15
#percentile of selection
try:
percentile = float(args[args.index('-percentile')+1])
except:
print "You did not provide a window argument, defaulting to .5"
percentile = .5
#Weight applied to Gi* Vector of main toponym being evaluated
try:
main_topo_weight = float(args[args.index('-main_topo_weight')+1])
except:
print "You did not provide a main topo weight, defaulting to 10"
main_topo_weight = 10.0
#Weight applied to Gi* Vector of other toponyms in context
try:
other_topo_weight = float(args[args.index('-other_topo_weight')+1])
except:
print "You did not provide an other topo weight, defaulting to 3"
other_topo_weight = 3.0
#Weight applied to Gi* Vector of other toponyms in context
try:
other_word_weight = float(args[args.index('-other_word_weight')+1])
except:
print "You did not provide an other word weight, defaulting to 1"
other_word_weight = 1.0
#Test Table Name
try:
tst_tbl = args[args.index('-tst_tbl')+1]
except:
print "You did not provide a test table argument"
sys.exit("Error")
#Country Table Name
try:
country_tbl = args[args.index('-country_tbl')+1]
except:
print "You did not provide a country table argument"
sys.exit("Error")
#Region Table Name
try:
region_tbl = args[args.index('-region_tbl')+1]
except:
print "You did not provide a region table argument"
sys.exit("Error")
#State Table Name
try:
state_tbl = args[args.index('-state_tbl')+1]
except:
print "You did not provide a region table argument"
sys.exit("Error")
#Geonames Table Name
try:
geonames_tbl = args[args.index('-geonames_tbl')+1]
except:
print "You did not provide a geonames table argument"
sys.exit("Error")
#US Prominent Table Name
#try:
# us_prom_tbl = args[args.index('-us_prom_tbl')+1]
#except:
# print "You did not provide a prominent US city table argument"
# sys.exit("Error")
#US Prominent Table Name
#try:
# world_prom_tbl = args[args.index('-world_prom_tbl')+1]
#except:
# print "You did not provide a prominent WORLD city table argument"
# sys.exit("Error")
#Geonames Table Name
try:
results_file = args[args.index('-results_file')+1]
except:
print "You did not provide a results file name, defaulting to TestResults.txt"
results_file = "TestResults.txt"
tstr.calc(in_domain_stat_tbl, out_domain_stat_tbl , f, conn_info, gtbl, window, percentile,
float(main_topo_weight), float(other_topo_weight), float(other_word_weight), country_tbl, region_tbl,
state_tbl, geonames_tbl, tst_tbl, float(in_domain_lambda), float(out_domain_lambda), results_file)
####################################################################################################################################
###################Test Toponym Resolver Using NER##################################################################################
####################################################################################################################################
if mode_arg.lower() == "topo_test_ner":
import TestResolverV4_NER as tstr
print "Starting test of topo resolver on TRConll"
#Statistics Table (Zavg/Gi*)
try:
stat_tbl = args[args.index("-stat_tbl")+1]
except:
print "You did not provide a name for a statistics table to use"
sys.exit("Error")
#Path containing stanford NER jar file
try:
stan_path = args[args.index("-stan_path")+1]
except:
print "You did not a directory for the stanford NER jar"
sys.exit("Error")
#Train file, dev file, or tst file
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
#Postgresql connection information
try:
conn_info = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
sys.exit("Error")
#Pointgrid table name
try:
gtbl = args[args.index('-gtbl')+1]
except:
print "You did not provide a grid table argument"
sys.exit("Error")
#context window size
try:
window = int(args[args.index('-window')+1])
except:
print "You did not provide a window argument, defaulting to 15"
window = 15
#percentile of selection
try:
percentile = float(args[args.index('-percentile')+1])
except:
print "You did not provide a window argument, defaulting to .5"
percentile = .5
#percentile of selection
try:
place_name_weight = float(args[args.index('-place_name_weight')+1])
except:
print "You did not provide a window argument, defaulting to 10"
place_name_weight = 10.0
#Test Table Name
try:
tst_tbl = args[args.index('-tst_tbl')+1]
except:
print "You did not provide a test table argument"
sys.exit("Error")
#Country Table Name
try:
country_tbl = args[args.index('-country_tbl')+1]
except:
print "You did not provide a country table argument"
sys.exit("Error")
#Region Table Name
try:
region_tbl = args[args.index('-region_tbl')+1]
except:
print "You did not provide a region table argument"
sys.exit("Error")
#State Table Name
try:
state_tbl = args[args.index('-state_tbl')+1]
except:
print "You did not provide a region table argument"
sys.exit("Error")
#Geonames Table Name
try:
geonames_tbl = args[args.index('-geonames_tbl')+1]
except:
print "You did not provide a geonames table argument"
sys.exit("Error")
#US Prominent Table Name
#try:
# us_prom_tbl = args[args.index('-us_prom_tbl')+1]
#except:
# print "You did not provide a prominent US city table argument"
# sys.exit("Error")
#US Prominent Table Name
#try:
# world_prom_tbl = args[args.index('-world_prom_tbl')+1]
#except:
# print "You did not provide a prominent WORLD city table argument"
# sys.exit("Error")
tstr.calc(stat_tbl , f, conn_info, gtbl, window, percentile, place_name_weight, country_tbl, region_tbl, state_tbl, geonames_tbl, tst_tbl, stan_path)
if mode_arg.lower() == "xml_topo_resolve_ner":
import TestResolver_xml_ner as tr
#Path containing stanford NER jar file
try:
stan_path = args[args.index("-stan_path")+1]
except:
print "You did not a directory for the stanford NER jar"
sys.exit("Error")
#Statistics Table (Zavg/Gi*)
try:
in_domain_stat_tbl = args[args.index("-indomain_stat_tbl")+1]
except:
print "You did not provide a name for an in domain statistics table to use"
print "Defaulting to None"
in_domain_stat_tbl = "None"
#sys.exit("Error")
try:
out_domain_stat_tbl = args[args.index("-outdomain_stat_tbl")+1]
except:
print "You did not provide a name for an out of domain statistics table to use"
print "Defaulting to None"
out_domain_stat_tbl = "None"
if in_domain_stat_tbl == "None" and out_domain_stat_tbl == "None":
print "Error:"
print "You provided neither an in domain or out of domain stat table"
sys.exit("Error")
#Lambda weight applied to Gi* vectors from the in domain statistics table
try:
in_domain_lambda = args[args.index("-in_domain_lambda")+1]
except:
print ""
print "You did not provide a value for in domain lambda, defaulting to 0.0"
print ""
in_domain_lambda = 0.0
try:
out_domain_lambda = args[args.index("-out_domain_lambda")+1]
except:
print ""
print "You did not provide a value for out domain lambda, defaulting to 0.0"
print ""
out_domain_lambda = 0.0
if float(in_domain_lambda) == 0.0 and float(out_domain_lambda) == 0.0:
print "Error:"
print "A value of 0.0 was provided for both -in_domain_lambda and -out_domain_lambda"
sys.exit("Error")
#Train file, dev file, or tst file
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
#Postgresql connection information
try:
conn_info = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
sys.exit("Error")
#Pointgrid table name
try:
gtbl = args[args.index('-gtbl')+1]
except:
print "You did not provide a grid table argument"
sys.exit("Error")
#context window size
try:
window = int(args[args.index('-window')+1])
except:
print "You did not provide a window argument, defaulting to 15"
window = 15
#percentile of selection
try:
percentile = float(args[args.index('-percentile')+1])
except:
print "You did not provide a window argument, defaulting to .5"
percentile = .5
#Weight applied to Gi* Vector of main toponym being evaluated
try:
main_topo_weight = float(args[args.index('-main_topo_weight')+1])
except:
print "You did not provide a main topo weight, defaulting to 10"
main_topo_weight = 10.0
#Weight applied to Gi* Vector of other toponyms in context
try:
other_topo_weight = float(args[args.index('-other_topo_weight')+1])
except:
print "You did not provide an other topo weight, defaulting to 5"
other_topo_weight = 5.0
#Weight applied to Gi* Vector of other toponyms in context
try:
other_word_weight = float(args[args.index('-other_word_weight')+1])
except:
print "You did not provide an other word weight, defaulting to 1"
other_word_weight = 1.0
#Country Table Name
try:
country_tbl = args[args.index('-country_tbl')+1]
except:
print "You did not provide a country table argument"
sys.exit("Error")
#Region Table Name
try:
region_tbl = args[args.index('-region_tbl')+1]
except:
print "You did not provide a region table argument"
sys.exit("Error")
#State Table Name
try:
state_tbl = args[args.index('-state_tbl')+1]
except:
print "You did not provide a region table argument"
sys.exit("Error")
#Geonames Table Name
try:
geonames_tbl = args[args.index('-geonames_tbl')+1]
except:
print "You did not provide a geonames table argument"
sys.exit("Error")
#Geonames Table Name
try:
results_file = args[args.index('-results_file')+1]
except:
print "You did not provide a results file name, defaulting to TestResults.txt"
results_file = "TestResults.txt"
tr.calc(in_domain_stat_tbl, out_domain_stat_tbl, f, conn_info, gtbl, window, percentile, float(main_topo_weight), float(other_topo_weight), float(other_word_weight), country_tbl, region_tbl, state_tbl, geonames_tbl, float(in_domain_lambda), float(out_domain_lambda), results_file, stan_path)
if mode_arg.lower() == "xml_topo_resolve":
import TestResolver_xml as tr
#Statistics Table (Zavg/Gi*)
try:
in_domain_stat_tbl = args[args.index("-indomain_stat_tbl")+1]
except:
print "You did not provide a name for an in domain statistics table to use"
print "Defaulting to None"
in_domain_stat_tbl = "None"
#sys.exit("Error")
try:
out_domain_stat_tbl = args[args.index("-outdomain_stat_tbl")+1]
except:
print "You did not provide a name for an out of domain statistics table to use"
print "Defaulting to None"
out_domain_stat_tbl = "None"
if in_domain_stat_tbl == "None" and out_domain_stat_tbl == "None":
print "Error:"
print "You provided neither an in domain or out of domain stat table"
sys.exit("Error")
#Lambda weight applied to Gi* vectors from the in domain statistics table
try:
in_domain_lambda = args[args.index("-in_domain_lambda")+1]
except:
print ""
print "You did not provide a value for in domain lambda, defaulting to 0.0"
print ""
in_domain_lambda = 0.0
try:
out_domain_lambda = args[args.index("-out_domain_lambda")+1]
except:
print ""
print "You did not provide a value for out domain lambda, defaulting to 0.0"
print ""
out_domain_lambda = 0.0
if float(in_domain_lambda) == 0.0 and float(out_domain_lambda) == 0.0:
print "Error:"
print "A value of 0.0 was provided for both -in_domain_lambda and -out_domain_lambda"
sys.exit("Error")
#Train file, dev file, or tst file
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
#Postgresql connection information
try:
conn_info = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
sys.exit("Error")
#Pointgrid table name
try:
gtbl = args[args.index('-gtbl')+1]
except:
print "You did not provide a grid table argument"
sys.exit("Error")
#context window size
try:
window = int(args[args.index('-window')+1])
except:
print "You did not provide a window argument, defaulting to 15"
window = 15
#percentile of selection
try:
percentile = float(args[args.index('-percentile')+1])
except:
print "You did not provide a window argument, defaulting to .5"
percentile = .5
#Weight applied to Gi* Vector of main toponym being evaluated
try:
main_topo_weight = float(args[args.index('-main_topo_weight')+1])
except:
print "You did not provide a main topo weight, defaulting to 10"
main_topo_weight = 10.0
#Weight applied to Gi* Vector of other toponyms in context
try:
other_topo_weight = float(args[args.index('-other_topo_weight')+1])
except:
print "You did not provide an other topo weight, defaulting to 5"
other_topo_weight = 5.0
#Weight applied to Gi* Vector of other toponyms in context
try:
other_word_weight = float(args[args.index('-other_word_weight')+1])
except:
print "You did not provide an other word weight, defaulting to 1"
other_word_weight = 1.0
#Country Table Name
try:
country_tbl = args[args.index('-country_tbl')+1]
except:
print "You did not provide a country table argument"
sys.exit("Error")
#Region Table Name
try:
region_tbl = args[args.index('-region_tbl')+1]
except:
print "You did not provide a region table argument"
sys.exit("Error")
#State Table Name
try:
state_tbl = args[args.index('-state_tbl')+1]
except:
print "You did not provide a region table argument"
sys.exit("Error")
#Geonames Table Name
try:
geonames_tbl = args[args.index('-geonames_tbl')+1]
except:
print "You did not provide a geonames table argument"
sys.exit("Error")
#US Prominent Table Name
#try:
# us_prom_tbl = args[args.index('-us_prom_tbl')+1]
#except:
# print "You did not provide a prominent US city table argument"
# sys.exit("Error")
#US Prominent Table Name
#try:
# world_prom_tbl = args[args.index('-world_prom_tbl')+1]
#except:
# print "You did not provide a prominent WORLD city table argument"
# sys.exit("Error")
#Geonames Table Name
try:
results_file = args[args.index('-results_file')+1]
except:
print "You did not provide a results file name, defaulting to TestResults.txt"
results_file = "TestResults.txt"
tr.calc(in_domain_stat_tbl, out_domain_stat_tbl, f, conn_info, gtbl, window, percentile, float(main_topo_weight), float(other_topo_weight), float(other_word_weight), country_tbl, region_tbl, state_tbl, geonames_tbl, float(in_domain_lambda), float(out_domain_lambda), results_file)
if mode_arg.lower() == "plain_topo_resolve":
import TestResolver_PlainText_NER as tr
#print "Starting test of topo resolver on TRConll"
#Path containing stanford NER jar file
try:
stan_path = args[args.index("-stan_path")+1]
except:
print "You did not a directory for the stanford NER jar"
sys.exit("Error")
#Statistics Table (Zavg/Gi*)
try:
in_domain_stat_tbl = args[args.index("-indomain_stat_tbl")+1]
except:
print "You did not provide a name for an in domain statistics table to use"
print "Defaulting to None"
in_domain_stat_tbl = "None"
#sys.exit("Error")
try:
out_domain_stat_tbl = args[args.index("-outdomain_stat_tbl")+1]
except:
print "You did not provide a name for an out of domain statistics table to use"
print "Defaulting to None"
out_domain_stat_tbl = "None"
if in_domain_stat_tbl == "None" and out_domain_stat_tbl == "None":
print "Error:"
print "You provided neither an in domain or out of domain stat table"
sys.exit("Error")
#Lambda weight applied to Gi* vectors from the in domain statistics table
try:
in_domain_lambda = args[args.index("-in_domain_lambda")+1]
except:
print ""
print "You did not provide a value for in domain lambda, defaulting to 0.0"
print ""
in_domain_lambda = 0.0
try:
out_domain_lambda = args[args.index("-out_domain_lambda")+1]
except:
print ""
print "You did not provide a value for out domain lambda, defaulting to 0.0"
print ""
out_domain_lambda = 0.0
if float(in_domain_lambda) == 0.0 and float(out_domain_lambda) == 0.0:
print "Error:"
print "A value of 0.0 was provided for both -in_domain_lambda and -out_domain_lambda"
sys.exit("Error")
#Train file, dev file, or tst file
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
#Postgresql connection information
try:
conn_info = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
sys.exit("Error")
#Pointgrid table name
try:
gtbl = args[args.index('-gtbl')+1]
except:
print "You did not provide a grid table argument"
sys.exit("Error")
#context window size
try:
window = int(args[args.index('-window')+1])
except:
print "You did not provide a window argument, defaulting to 15"
window = 15
#percentile of selection
try:
percentile = float(args[args.index('-percentile')+1])
except:
print "You did not provide a window argument, defaulting to .5"
percentile = .5
#Weight applied to Gi* Vector of main toponym being evaluated
try:
main_topo_weight = float(args[args.index('-main_topo_weight')+1])
except:
print "You did not provide a main topo weight, defaulting to 10"
main_topo_weight = 10.0
#Weight applied to Gi* Vector of other toponyms in context
try:
other_topo_weight = float(args[args.index('-other_topo_weight')+1])
except:
print "You did not provide an other topo weight, defaulting to 5"
other_topo_weight = 5.0
#Weight applied to Gi* Vector of other toponyms in context
try:
other_word_weight = float(args[args.index('-other_word_weight')+1])
except:
print "You did not provide an other word weight, defaulting to 1"
other_word_weight = 1.0
#Country Table Name
try:
country_tbl = args[args.index('-country_tbl')+1]
except:
print "You did not provide a country table argument"
sys.exit("Error")
#Region Table Name
try:
region_tbl = args[args.index('-region_tbl')+1]
except:
print "You did not provide a region table argument"
sys.exit("Error")
#State Table Name
try:
state_tbl = args[args.index('-state_tbl')+1]
except:
print "You did not provide a region table argument"
sys.exit("Error")
#Geonames Table Name
try:
geonames_tbl = args[args.index('-geonames_tbl')+1]
except:
print "You did not provide a geonames table argument"
sys.exit("Error")
#US Prominent Table Name
#try:
# us_prom_tbl = args[args.index('-us_prom_tbl')+1]
#except:
# print "You did not provide a prominent US city table argument"
# sys.exit("Error")
#US Prominent Table Name
#try:
# world_prom_tbl = args[args.index('-world_prom_tbl')+1]
#except:
# print "You did not provide a prominent WORLD city table argument"
# sys.exit("Error")
#Geonames Table Name
try:
results_file = args[args.index('-results_file')+1]
except:
print "You did not provide a results file name, defaulting to TestResults.txt"
results_file = "TestResults.txt"
tr.calc(in_domain_stat_tbl, out_domain_stat_tbl, f, conn_info, gtbl, window, percentile, float(main_topo_weight), float(other_topo_weight), float(other_word_weight), country_tbl, region_tbl, state_tbl, geonames_tbl, float(in_domain_lambda), float(out_domain_lambda), results_file, stan_path)
##################Perform Moran's Calculations#################
if mode_arg.lower() == "calc_morans":
import MoransV1 as morans
print "Starting Morans Coef. Calculation"
#Train file, dev file, or tst file
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
#Train type should be wiki or twitter
try:
traintype = args[args.index("-traintype")+1]
except:
print "You did not provide a training type for your train file (e.g. wiki or twitter)"
sys.exit("Error")
#Document table
try:
dtbl = args[args.index("-dtbl")+1]
except:
print "You did not provide a name for the input document table"
sys.exit("Error")
#Pointgrid table
try:
gtbl = args[args.index('-gtbl')+1]
except:
print "You did not provide a grid table argument"
sys.exit("Error")
#DB connection Information
#(in the future switch to a .config file)
try:
conn = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
sys.exit("Error")
#Distance at which to aggregate documents
try:
agg_dist = args[args.index('-agg_dist')+1]
except:
print "Did not provide a bandwidth size for the agg_dist (aggregate distance) kernel"
sys.exit("Error")
#Distance that determines adjacency relation for Moran's weights
try:
kern_dist = args[args.index('-kern_dist')+1]
except:
print "Did not provide a bandwidth size for the kern_dist (Morans kernel distance) kernel"
sys.exit("Error")
#Out file with Moran's scores
try:
outf = args[args.index('-outf')+1]
except:
print "You did not provide an outfile for where the moran's scores will be written"
sys.exit("Error")
#Write aggregated language models option
#Defaults to false if not provided
try:
if "-use_agg_lm" in args and args[args.index('-use_agg_lm')+1] != False:
use_agg_lm = True
else: use_agg_lm = False
except:
print "You did not specify whether you wanted to use an aggregated file, defaulting to False"
use_agg_lm = False
#Use aggregated LM
#Defaults to false if not provided
try:
if "-write_agg_lm" in args and args[args.index('-write_agg_lm')+1] != "False":
write_agg_lm = True
else: write_agg_lm = False
except:
print "You did not provide a write aggregate option, defaulting to false"
write_agg_lm = False
#Write file for aggregated LM
#Defaults to "tmp.txt" if not provided
try:
write_agg_file = args[args.index('-write_agg_file')+1]
except:
print "You did not provide a write aggregate outfile option, defaulting to tmp.txt"
write_agg_file = "tmp.txt"
#Significance testing for Moran's Coef values (true, false)
try:
sig_test = args[args.index('-sig_test')+1]
if str(sig_test).lower() != "false":
sig_test = True
else: sig_test = False
except:
print "You did not provide a significance test option, defaulting to false"
sig_test = False
#neighbor reference file mode, meant to make calculations independent of DB connection. Not yet implemented
try:
neighbor_ref_file = args[args.index('-nieghbor_file')+1]
except:
print "You did not provide a neighbor reference file, defaulting to None"
neighbor_ref_file = "None"
#Calculate means and moran's scores using only grid cells where a word is observed (appears, all)
try:
mean_method = args[args.index('-mean_method')+1]
except:
print "Did not provide a mean_method argument, defaulting to 'appears'"
mean_method = "appears"
try:
grid_min = args[args.index('-grid_freq_min')+1]
except:
print "Did not provide a grid_freq_min argument... defaulting to 1"
grid_min = 0
#The number of iterations to perform on Monte Carlo Significance Simulation
try:
iterations = int(args[args.index('-iterations')+1])
except:
print "Did not provide a -iterations argument, defaulting to 0"
iterations = 0
#The number of cores you want to devote to multiprocessed significance testing
try:
cores = int(args[args.index('-cores')+1])
except:
print "Did not provide a -cores argument, defaulting to 1"
cores = 1
morans.calc(f, dtbl, gtbl, conn, outf, agg_dist, kern_dist, traintype.lower(), write_agg_lm, use_agg_lm, write_agg_file, sig_test, neighbor_ref_file, mean_method, int(grid_min), iterations, cores)
else:
print "TopoCluster Run Modes:"
print "-mode loadDB"
print "-mode calc_morans"
print "-mode plain_topo_resolve"
print "-mode topo_test"
print "-mode topo_test_ner"
print "-mode local_stats"
print "========================"
print "Please visit https://github.com/grantdelozier/TopoCluster/blob/master/README.md for more information"
|
|
'''
ModalView
=========
.. versionadded:: 1.4.0
The :class:`ModalView` widget is used to create modal views. By default, the
view will cover the whole "parent" window.
Remember that the default size of a Widget is size_hint=(1, 1). If you don't
want your view to be fullscreen, either use size hints with values lower than
1 (for instance size_hint=(.8, .8)) or deactivate the size_hint and use fixed
size attributes.
Examples
--------
Example of a simple 400x400 Hello world view::
view = ModalView(size_hint=(None, None), size=(400, 400))
view.add_widget(Label(text='Hello world'))
By default, any click outside the view will dismiss it. If you don't
want that, you can set :attr:`ModalView.auto_dismiss` to False::
view = ModalView(auto_dismiss=False)
view.add_widget(Label(text='Hello world'))
view.open()
To manually dismiss/close the view, use the :meth:`ModalView.dismiss` method of
the ModalView instance::
view.dismiss()
Both :meth:`ModalView.open` and :meth:`ModalView.dismiss` are bindable. That
means you can directly bind the function to an action, e.g. to a button's
on_press ::
# create content and add it to the view
content = Button(text='Close me!')
view = ModalView(auto_dismiss=False)
view.add_widget(content)
# bind the on_press event of the button to the dismiss function
content.bind(on_press=view.dismiss)
# open the view
view.open()
ModalView Events
----------------
There are two events available: `on_open` which is raised when the view is
opening, and `on_dismiss` which is raised when the view is closed.
For `on_dismiss`, you can prevent the view from closing by explictly returning
True from your callback. ::
def my_callback(instance):
print('ModalView', instance, 'is being dismissed, but is prevented!')
return True
view = ModalView()
view.add_widget(Label(text='Hello world'))
view.bind(on_dismiss=my_callback)
view.open()
.. versionchanged:: 1.5.0
The ModalView can be closed by hitting the escape key on the
keyboard if the :attr:`ModalView.auto_dismiss` property is True (the
default).
'''
__all__ = ('ModalView', )
from kivy.logger import Logger
from kivy.animation import Animation
from kivy.uix.anchorlayout import AnchorLayout
from kivy.properties import StringProperty, BooleanProperty, ObjectProperty, \
NumericProperty, ListProperty
class ModalView(AnchorLayout):
'''ModalView class. See module documentation for more information.
:Events:
`on_pre_open`:
Fired before the ModalView is opened. When this event is fired
ModalView is not yet added to window.
`on_open`:
Fired when the ModalView is opened.
`on_pre_dismiss`:
Fired before the ModalView is closed.
`on_dismiss`:
Fired when the ModalView is closed. If the callback returns True,
the dismiss will be canceled.
.. versionchanged:: 1.11.0
Added events `on_pre_open` and `on_pre_dismiss`.
'''
auto_dismiss = BooleanProperty(True)
'''This property determines if the view is automatically
dismissed when the user clicks outside it.
:attr:`auto_dismiss` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
attach_to = ObjectProperty(None)
'''If a widget is set on attach_to, the view will attach to the nearest
parent window of the widget. If none is found, it will attach to the
main/global Window.
:attr:`attach_to` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
background_color = ListProperty([0, 0, 0, .7])
'''Background color in the format (r, g, b, a).
:attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [0, 0, 0, .7].
'''
background = StringProperty(
'atlas://data/images/defaulttheme/modalview-background')
'''Background image of the view used for the view background.
:attr:`background` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/modalview-background'.
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used for the :attr:`background_normal` and the
:attr:`background_down` properties. Can be used when using custom
backgrounds.
It must be a list of four values: (bottom, right, top, left). Read the
BorderImage instructions for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to
(16, 16, 16, 16).
'''
# Internals properties used for graphical representation.
_anim_alpha = NumericProperty(0)
_anim_duration = NumericProperty(.1)
_window = ObjectProperty(None, allownone=True, rebind=True)
__events__ = ('on_pre_open', 'on_open', 'on_pre_dismiss', 'on_dismiss')
def __init__(self, **kwargs):
self._parent = None
super(ModalView, self).__init__(**kwargs)
def _search_window(self):
# get window to attach to
window = None
if self.attach_to is not None:
window = self.attach_to.get_parent_window()
if not window:
window = self.attach_to.get_root_window()
if not window:
from kivy.core.window import Window
window = Window
return window
def open(self, *largs, **kwargs):
'''Show the view window from the :attr:`attach_to` widget. If set, it
will attach to the nearest window. If the widget is not attached to any
window, the view will attach to the global
:class:`~kivy.core.window.Window`.
When the view is opened, it will be faded in with an animation. If you
don't want the animation, use::
view.open(animation=False)
'''
if self._window is not None:
Logger.warning('ModalView: you can only open once.')
return
# search window
self._window = self._search_window()
if not self._window:
Logger.warning('ModalView: cannot open view, no window found.')
return
self.dispatch('on_pre_open')
self._window.add_widget(self)
self._window.bind(
on_resize=self._align_center,
on_keyboard=self._handle_keyboard)
self.center = self._window.center
self.fbind('center', self._align_center)
self.fbind('size', self._align_center)
if kwargs.get('animation', True):
a = Animation(_anim_alpha=1., d=self._anim_duration)
a.bind(on_complete=lambda *x: self.dispatch('on_open'))
a.start(self)
else:
self._anim_alpha = 1.
self.dispatch('on_open')
def dismiss(self, *largs, **kwargs):
'''Close the view if it is open. If you really want to close the
view, whatever the on_dismiss event returns, you can use the *force*
argument:
::
view = ModalView()
view.dismiss(force=True)
When the view is dismissed, it will be faded out before being
removed from the parent. If you don't want animation, use::
view.dismiss(animation=False)
'''
if self._window is None:
return
self.dispatch('on_pre_dismiss')
if self.dispatch('on_dismiss') is True:
if kwargs.get('force', False) is not True:
return
if kwargs.get('animation', True):
Animation(_anim_alpha=0., d=self._anim_duration).start(self)
else:
self._anim_alpha = 0
self._real_remove_widget()
def _align_center(self, *l):
if self._window:
self.center = self._window.center
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
if self.auto_dismiss:
self.dismiss()
return True
super(ModalView, self).on_touch_down(touch)
return True
def on_touch_move(self, touch):
super(ModalView, self).on_touch_move(touch)
return True
def on_touch_up(self, touch):
super(ModalView, self).on_touch_up(touch)
return True
def on__anim_alpha(self, instance, value):
if value == 0 and self._window is not None:
self._real_remove_widget()
def _real_remove_widget(self):
if self._window is None:
return
self._window.remove_widget(self)
self._window.unbind(
on_resize=self._align_center,
on_keyboard=self._handle_keyboard)
self._window = None
def on_pre_open(self):
pass
def on_open(self):
pass
def on_pre_dismiss(self):
pass
def on_dismiss(self):
pass
def _handle_keyboard(self, window, key, *largs):
if key == 27 and self.auto_dismiss:
self.dismiss()
return True
if __name__ == '__main__':
from kivy.base import runTouchApp
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.core.window import Window
# add view
content = GridLayout(cols=1)
content.add_widget(Label(text='This is a hello world'))
view = ModalView(size_hint=(None, None), size=(256, 256),
auto_dismiss=True)
view.add_widget(content)
def open_view(btn):
view.open()
layout = GridLayout(cols=3)
for x in range(9):
btn = Button(text='click me %s' % x)
btn.bind(on_release=view.open)
layout.add_widget(btn)
Window.add_widget(layout)
view.open()
runTouchApp()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from horizon import exceptions
from openstack_dashboard.api import base as api_base
from openstack_dashboard.test import helpers as test
class APIResource(api_base.APIResourceWrapper):
"""Simple APIResource for testing."""
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerObject=None):
if innerObject is None:
class InnerAPIResource(object):
pass
innerObject = InnerAPIResource()
innerObject.foo = 'foo'
innerObject.bar = 'bar'
return APIResource(innerObject)
class APIDict(api_base.APIDictWrapper):
"""Simple APIDict for testing."""
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerDict=None):
if innerDict is None:
innerDict = {'foo': 'foo',
'bar': 'bar'}
return APIDict(innerDict)
# Wrapper classes that only define _attrs don't need extra testing.
class APIResourceWrapperTests(test.TestCase):
def test_get_attribute(self):
resource = APIResource.get_instance()
self.assertEqual(resource.foo, 'foo')
def test_get_invalid_attribute(self):
resource = APIResource.get_instance()
self.assertNotIn('missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
def test_get_inner_missing_attribute(self):
resource = APIResource.get_instance()
with self.assertRaises(AttributeError):
resource.baz
def test_repr(self):
resource = APIResource.get_instance()
resource_str = resource.__repr__()
self.assertIn('foo', resource_str)
self.assertIn('bar', resource_str)
self.assertNotIn('baz', resource_str)
class APIDictWrapperTests(test.TestCase):
# APIDict allows for both attribute access and dictionary style [element]
# style access. Test both
def test_get_item(self):
resource = APIDict.get_instance()
self.assertEqual(resource.foo, 'foo')
self.assertEqual(resource['foo'], 'foo')
def test_get_invalid_item(self):
resource = APIDict.get_instance()
self.assertNotIn('missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
with self.assertRaises(KeyError):
resource['missing']
def test_get_inner_missing_attribute(self):
resource = APIDict.get_instance()
with self.assertRaises(AttributeError):
resource.baz
with self.assertRaises(KeyError):
resource['baz']
def test_get_with_default(self):
resource = APIDict.get_instance()
self.assertEqual(resource.get('foo'), 'foo')
self.assertIsNone(resource.get('baz'))
self.assertEqual('retValue', resource.get('baz', 'retValue'))
class ApiHelperTests(test.TestCase):
"""Tests for functions that don't use one of the api objects."""
def test_url_for(self):
url = api_base.url_for(self.request, 'auditlog')
self.assertEqual(url, 'http://public.auditlog.example.com:9090')
url = api_base.url_for(self.request, 'auditlog',
endpoint_type='adminURL')
self.assertEqual(url, 'http://admin.auditlog.example.com:9090')
url = api_base.url_for(self.request, 'image')
self.assertEqual(url, 'http://public.glance.example.com:9292/v1')
url = api_base.url_for(self.request, 'image', endpoint_type='adminURL')
self.assertEqual(url, 'http://admin.glance.example.com:9292/v1')
url = api_base.url_for(self.request, 'compute')
self.assertEqual(url, 'http://public.nova.example.com:8774/v2')
url = api_base.url_for(self.request, 'compute',
endpoint_type='adminURL')
self.assertEqual(url, 'http://admin.nova.example.com:8774/v2')
url = api_base.url_for(self.request, 'volume')
self.assertEqual(url, 'http://public.nova.example.com:8776/v1')
url = api_base.url_for(self.request, 'volume',
endpoint_type="internalURL")
self.assertEqual(url, 'http://int.nova.example.com:8776/v1')
url = api_base.url_for(self.request, 'volume',
endpoint_type='adminURL')
self.assertEqual(url, 'http://admin.nova.example.com:8776/v1')
self.assertNotIn('notAnApi', self.request.user.service_catalog,
'Select a new nonexistent service catalog key')
with self.assertRaises(exceptions.ServiceCatalogException):
url = api_base.url_for(self.request, 'notAnApi')
self.request.user.services_region = "RegionTwo"
url = api_base.url_for(self.request, 'compute')
self.assertEqual(url, 'http://public.nova2.example.com:8774/v2')
self.request.user.services_region = "RegionTwo"
url = api_base.url_for(self.request, 'compute',
endpoint_type='adminURL')
self.assertEqual(url, 'http://admin.nova2.example.com:8774/v2')
self.request.user.services_region = "RegionTwo"
with self.assertRaises(exceptions.ServiceCatalogException):
url = api_base.url_for(self.request, 'image')
self.request.user.services_region = "bogus_value"
url = api_base.url_for(self.request, 'identity',
endpoint_type='adminURL')
self.assertEqual(url, 'http://admin.keystone.example.com:35357/v2.0')
self.request.user.services_region = "bogus_value"
with self.assertRaises(exceptions.ServiceCatalogException):
url = api_base.url_for(self.request, 'image')
class QuotaSetTests(test.TestCase):
def test_quotaset_add_with_plus(self):
quota_dict = {'foo': 1, 'bar': 10}
other_quota_dict = {'my_test': 12}
quota_set = api_base.QuotaSet(quota_dict)
other_quota_set = api_base.QuotaSet(other_quota_dict)
quota_set += other_quota_set
self.assertEqual(len(quota_set), 3)
quota_dict.update(other_quota_dict)
for q in quota_set:
self.assertEqual(q.limit, quota_dict[q.name])
def test_quotaset_add_doesnt_override_existing_quota(self):
quota_dict = {'foo': 1, 'bar': 10}
quota_set = api_base.QuotaSet(quota_dict)
other_quota_set = api_base.QuotaSet({'foo': 12})
quota_set += other_quota_set
self.assertEqual(len(quota_set), 2)
for q in quota_set:
self.assertEqual(q.limit, quota_dict[q.name])
def test_quotaset_add_method(self):
quota_dict = {'foo': 1, 'bar': 10}
other_quota_dict = {'my_test': 12}
quota_set = api_base.QuotaSet(quota_dict)
other_quota_set = api_base.QuotaSet(other_quota_dict)
quota_set.add(other_quota_set)
self.assertEqual(len(quota_set), 3)
quota_dict.update(other_quota_dict)
for q in quota_set:
self.assertEqual(q.limit, quota_dict[q.name])
def test_quotaset_add_with_wrong_type(self):
quota_set = api_base.QuotaSet({'foo': 1, 'bar': 10})
self.assertRaises(ValueError, quota_set.add, {'test': 7})
|
|
#
# Copyright 2021 Espressif Systems (Shanghai) CO., LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from construct import Int16ul, Int32ul, Int64ul, Struct
from . import BaseArchMethodsMixin, BaseTargetMethods, ESPCoreDumpLoaderError
try:
from typing import Any, Optional, Tuple
except ImportError:
pass
INVALID_CAUSE_VALUE = 0xFFFF
XCHAL_EXCCAUSE_NUM = 64
# Exception cause dictionary to get translation of exccause register
# From 4.4.1.5 table 4-64 Exception Causes of Xtensa
# Instruction Set Architecture (ISA) Reference Manual
XTENSA_EXCEPTION_CAUSE_DICT = {
0: ('IllegalInstructionCause', 'Illegal instruction'),
1: ('SyscallCause', 'SYSCALL instruction'),
2: ('InstructionFetchErrorCause',
'Processor internal physical address or data error during instruction fetch. (See EXCVADDR for more information)'),
3: ('LoadStoreErrorCause',
'Processor internal physical address or data error during load or store. (See EXCVADDR for more information)'),
4: ('Level1InterruptCause', 'Level-1 interrupt as indicated by set level-1 bits in the INTERRUPT register'),
5: ('AllocaCause', 'MOVSP instruction, if caller`s registers are not in the register file'),
6: ('IntegerDivideByZeroCause', 'QUOS: QUOU, REMS: or REMU divisor operand is zero'),
8: ('PrivilegedCause', 'Attempt to execute a privileged operation when CRING ? 0'),
9: ('LoadStoreAlignmentCause', 'Load or store to an unaligned address. (See EXCVADDR for more information)'),
12: ('InstrPIFDataErrorCause', 'PIF data error during instruction fetch. (See EXCVADDR for more information)'),
13: ('LoadStorePIFDataErrorCause',
'Synchronous PIF data error during LoadStore access. (See EXCVADDR for more information)'),
14: ('InstrPIFAddrErrorCause', 'PIF address error during instruction fetch. (See EXCVADDR for more information)'),
15: ('LoadStorePIFAddrErrorCause',
'Synchronous PIF address error during LoadStore access. (See EXCVADDR for more information)'),
16: ('InstTLBMissCause', 'Error during Instruction TLB refill. (See EXCVADDR for more information)'),
17: ('InstTLBMultiHitCause', 'Multiple instruction TLB entries matched. (See EXCVADDR for more information)'),
18: ('InstFetchPrivilegeCause',
'An instruction fetch referenced a virtual address at a ring level less than CRING. (See EXCVADDR for more information)'),
20: ('InstFetchProhibitedCause',
'An instruction fetch referenced a page mapped with an attribute that does not permit instruction fetch (EXCVADDR).'),
24: ('LoadStoreTLBMissCause', 'Error during TLB refill for a load or store. (See EXCVADDR for more information)'),
25: ('LoadStoreTLBMultiHitCause',
'Multiple TLB entries matched for a load or store. (See EXCVADDR for more information)'),
26: ('LoadStorePrivilegeCause',
'A load or store referenced a virtual address at a ring level less than CRING. (See EXCVADDR for more information)'),
28: ('LoadProhibitedCause',
'A load referenced a page mapped with an attribute that does not permit loads. (See EXCVADDR for more information)'),
29: ('StoreProhibitedCause',
'A store referenced a page mapped with an attribute that does not permit stores [Region Protection Option or MMU Option].'),
32: ('Coprocessor0Disabled', 'Coprocessor 0 instruction when cp0 disabled'),
33: ('Coprocessor1Disabled', 'Coprocessor 1 instruction when cp1 disabled'),
34: ('Coprocessor2Disabled', 'Coprocessor 2 instruction when cp2 disabled'),
35: ('Coprocessor3Disabled', 'Coprocessor 3 instruction when cp3 disabled'),
36: ('Coprocessor4Disabled', 'Coprocessor 4 instruction when cp4 disabled'),
37: ('Coprocessor5Disabled', 'Coprocessor 5 instruction when cp5 disabled'),
38: ('Coprocessor6Disabled', 'Coprocessor 6 instruction when cp6 disabled'),
39: ('Coprocessor7Disabled', 'Coprocessor 7 instruction when cp7 disabled'),
INVALID_CAUSE_VALUE: (
'InvalidCauseRegister', 'Invalid EXCCAUSE register value or current task is broken and was skipped'),
# ESP panic pseudo reasons
XCHAL_EXCCAUSE_NUM + 0: ('UnknownException', 'Unknown exception'),
XCHAL_EXCCAUSE_NUM + 1: ('DebugException', 'Unhandled debug exception'),
XCHAL_EXCCAUSE_NUM + 2: ('DoubleException', 'Double exception'),
XCHAL_EXCCAUSE_NUM + 3: ('KernelException', 'Unhandled kernel exception'),
XCHAL_EXCCAUSE_NUM + 4: ('CoprocessorException', 'Coprocessor exception'),
XCHAL_EXCCAUSE_NUM + 5: ('InterruptWDTTimoutCPU0', 'Interrupt wdt timeout on CPU0'),
XCHAL_EXCCAUSE_NUM + 6: ('InterruptWDTTimoutCPU1', 'Interrupt wdt timeout on CPU1'),
XCHAL_EXCCAUSE_NUM + 7: ('CacheError', 'Cache disabled but cached memory region accessed'),
}
class ExceptionRegisters(object):
# extra regs IDs used in EXTRA_INFO note
EXCCAUSE_IDX = 0
EXCVADDR_IDX = 1
EPC1_IDX = 177
EPC2_IDX = 178
EPC3_IDX = 179
EPC4_IDX = 180
EPC5_IDX = 181
EPC6_IDX = 182
EPC7_IDX = 183
EPS2_IDX = 194
EPS3_IDX = 195
EPS4_IDX = 196
EPS5_IDX = 197
EPS6_IDX = 198
EPS7_IDX = 199
@property
def registers(self): # type: () -> dict[str, int]
return {k: v for k, v in self.__class__.__dict__.items()
if not k.startswith('__') and isinstance(v, int)}
# Following structs are based on source code
# IDF_PATH/components/espcoredump/src/core_dump_port.c
PrStatus = Struct(
'si_signo' / Int32ul,
'si_code' / Int32ul,
'si_errno' / Int32ul,
'pr_cursig' / Int16ul,
'pr_pad0' / Int16ul,
'pr_sigpend' / Int32ul,
'pr_sighold' / Int32ul,
'pr_pid' / Int32ul,
'pr_ppid' / Int32ul,
'pr_pgrp' / Int32ul,
'pr_sid' / Int32ul,
'pr_utime' / Int64ul,
'pr_stime' / Int64ul,
'pr_cutime' / Int64ul,
'pr_cstime' / Int64ul,
)
def print_exc_regs_info(extra_info): # type: (list[int]) -> None
"""
Print the register info by parsing extra_info
:param extra_info: extra info data str
:return: None
"""
exccause = extra_info[1 + 2 * ExceptionRegisters.EXCCAUSE_IDX + 1]
exccause_str = XTENSA_EXCEPTION_CAUSE_DICT.get(exccause)
if not exccause_str:
exccause_str = ('Invalid EXCCAUSE code', 'Invalid EXCAUSE description or not found.')
print('exccause 0x%x (%s)' % (exccause, exccause_str[0]))
print('excvaddr 0x%x' % extra_info[1 + 2 * ExceptionRegisters.EXCVADDR_IDX + 1])
# skip crashed_task_tcb, exccause, and excvaddr
for i in range(5, len(extra_info), 2):
if (extra_info[i] >= ExceptionRegisters.EPC1_IDX and extra_info[i] <= ExceptionRegisters.EPC7_IDX):
print('epc%d 0x%x' % ((extra_info[i] - ExceptionRegisters.EPC1_IDX + 1), extra_info[i + 1]))
# skip crashed_task_tcb, exccause, and excvaddr
for i in range(5, len(extra_info), 2):
if (extra_info[i] >= ExceptionRegisters.EPS2_IDX and extra_info[i] <= ExceptionRegisters.EPS7_IDX):
print('eps%d 0x%x' % ((extra_info[i] - ExceptionRegisters.EPS2_IDX + 2), extra_info[i + 1]))
# from "gdb/xtensa-tdep.h"
# typedef struct
# {
# 0 xtensa_elf_greg_t pc;
# 1 xtensa_elf_greg_t ps;
# 2 xtensa_elf_greg_t lbeg;
# 3 xtensa_elf_greg_t lend;
# 4 xtensa_elf_greg_t lcount;
# 5 xtensa_elf_greg_t sar;
# 6 xtensa_elf_greg_t windowstart;
# 7 xtensa_elf_greg_t windowbase;
# 8..63 xtensa_elf_greg_t reserved[8+48];
# 64 xtensa_elf_greg_t ar[64];
# } xtensa_elf_gregset_t;
REG_PC_IDX = 0
REG_PS_IDX = 1
REG_LB_IDX = 2
REG_LE_IDX = 3
REG_LC_IDX = 4
REG_SAR_IDX = 5
# REG_WS_IDX = 6
# REG_WB_IDX = 7
REG_AR_START_IDX = 64
# REG_AR_NUM = 64
# FIXME: acc to xtensa_elf_gregset_t number of regs must be 128,
# but gdb complains when it less then 129
REG_NUM = 129
# XT_SOL_EXIT = 0
XT_SOL_PC = 1
XT_SOL_PS = 2
# XT_SOL_NEXT = 3
XT_SOL_AR_START = 4
XT_SOL_AR_NUM = 4
# XT_SOL_FRMSZ = 8
XT_STK_EXIT = 0
XT_STK_PC = 1
XT_STK_PS = 2
XT_STK_AR_START = 3
XT_STK_AR_NUM = 16
XT_STK_SAR = 19
XT_STK_EXCCAUSE = 20
XT_STK_EXCVADDR = 21
XT_STK_LBEG = 22
XT_STK_LEND = 23
XT_STK_LCOUNT = 24
XT_STK_FRMSZ = 25
class XtensaMethodsMixin(BaseArchMethodsMixin):
@staticmethod
def get_registers_from_stack(data, grows_down):
# type: (bytes, bool) -> Tuple[list[int], Optional[dict[int, int]]]
extra_regs = {v: 0 for v in ExceptionRegisters().registers.values()}
regs = [0] * REG_NUM
# TODO: support for growing up stacks
if not grows_down:
raise ESPCoreDumpLoaderError('Growing up stacks are not supported for now!')
ex_struct = Struct(
'stack' / Int32ul[XT_STK_FRMSZ]
)
if len(data) < ex_struct.sizeof():
raise ESPCoreDumpLoaderError('Too small stack to keep frame: %d bytes!' % len(data))
stack = ex_struct.parse(data).stack
# Stack frame type indicator is always the first item
rc = stack[XT_STK_EXIT]
if rc != 0:
regs[REG_PC_IDX] = stack[XT_STK_PC]
regs[REG_PS_IDX] = stack[XT_STK_PS]
for i in range(XT_STK_AR_NUM):
regs[REG_AR_START_IDX + i] = stack[XT_STK_AR_START + i]
regs[REG_SAR_IDX] = stack[XT_STK_SAR]
regs[REG_LB_IDX] = stack[XT_STK_LBEG]
regs[REG_LE_IDX] = stack[XT_STK_LEND]
regs[REG_LC_IDX] = stack[XT_STK_LCOUNT]
# FIXME: crashed and some running tasks (e.g. prvIdleTask) have EXCM bit set
# and GDB can not unwind callstack properly (it implies not windowed call0)
if regs[REG_PS_IDX] & (1 << 5):
regs[REG_PS_IDX] &= ~(1 << 4)
if stack[XT_STK_EXCCAUSE] in XTENSA_EXCEPTION_CAUSE_DICT:
extra_regs[ExceptionRegisters.EXCCAUSE_IDX] = stack[XT_STK_EXCCAUSE]
else:
extra_regs[ExceptionRegisters.EXCCAUSE_IDX] = INVALID_CAUSE_VALUE
extra_regs[ExceptionRegisters.EXCVADDR_IDX] = stack[XT_STK_EXCVADDR]
else:
regs[REG_PC_IDX] = stack[XT_SOL_PC]
regs[REG_PS_IDX] = stack[XT_SOL_PS]
for i in range(XT_SOL_AR_NUM):
regs[REG_AR_START_IDX + i] = stack[XT_SOL_AR_START + i]
# nxt = stack[XT_SOL_NEXT]
return regs, extra_regs
@staticmethod
def build_prstatus_data(tcb_addr, task_regs): # type: (int, list[int]) -> Any
return PrStatus.build({
'si_signo': 0,
'si_code': 0,
'si_errno': 0,
'pr_cursig': 0, # TODO: set sig only for current/failed task
'pr_pad0': 0,
'pr_sigpend': 0,
'pr_sighold': 0,
'pr_pid': tcb_addr,
'pr_ppid': 0,
'pr_pgrp': 0,
'pr_sid': 0,
'pr_utime': 0,
'pr_stime': 0,
'pr_cutime': 0,
'pr_cstime': 0,
}) + Int32ul[len(task_regs)].build(task_regs)
class Esp32Methods(BaseTargetMethods, XtensaMethodsMixin):
TARGET = 'esp32'
class Esp32S2Methods(BaseTargetMethods, XtensaMethodsMixin):
TARGET = 'esp32s2'
class Esp32S3Methods(BaseTargetMethods, XtensaMethodsMixin):
TARGET = 'esp32s3'
|
|
from __future__ import division
import numpy as np
from itertools import combinations_with_replacement
from .core import Data, Summary, Propensity, PropensitySelect, Strata
from .estimators import OLS, Blocking, Weighting, Matching, Estimators
class CausalModel(object):
def __init__(self, Y, D, X):
self.old_data = Data(Y, D, X)
self.reset()
def reset(self):
"""
Reinitializes data to original inputs, and drops any estimated
results.
"""
Y, D, X = self.old_data['Y'], self.old_data['D'], self.old_data['X']
self.raw_data = Data(Y, D, X)
self.summary_stats = Summary(self.raw_data)
self.propensity = None
self.cutoff = None
self.blocks = None
self.strata = None
self.estimates = Estimators()
def est_propensity(self, lin='all', qua=None):
"""
Estimates the propensity scores given list of covariates to
include linearly or quadratically.
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
Expected args
-------------
lin: string, list
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to the string 'all', which
uses whole covariate matrix.
qua: list
Tuples indicating which columns of the original
covariate matrix to multiply and include. E.g.,
[(1,1), (2,3)] indicates squaring the 2nd column
and including the product of the 3rd and 4th
columns. Default is to not include any
quadratic terms.
"""
lin_terms = parse_lin_terms(self.raw_data['K'], lin)
qua_terms = parse_qua_terms(self.raw_data['K'], qua)
self.propensity = Propensity(self.raw_data, lin_terms, qua_terms)
self.raw_data._dict['pscore'] = self.propensity['fitted']
self._post_pscore_init()
def est_propensity_s(self, lin_B=None, C_lin=1, C_qua=2.71):
"""
Estimates the propensity score with covariates selected using
the algorithm suggested by Imbens and Rubin (2015).
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
The covariate selection algorithm is based on a sequence
of likelihood ratio tests.
Expected args
-------------
lin_B: list
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to empty list, meaning
every column of X is subjected to the
selection algorithm.
C_lin: scalar
Critical value used in likelihood ratio tests
to decide whether candidate linear terms should
be included. Defaults to 1 as in Imbens and
Rubin (2015).
C_qua: scalar
Critical value used in likelihood ratio tests
to decide whether candidate quadratic terms
should be included. Defaults to 2.71 as in
Imbens and Rubin (2015).
References
----------
Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
lin_basic = parse_lin_terms(self.raw_data['K'], lin_B)
self.propensity = PropensitySelect(self.raw_data, lin_basic,
C_lin, C_qua)
self.raw_data._dict['pscore'] = self.propensity['fitted']
self._post_pscore_init()
def trim(self):
"""
Trims data based on propensity score to create a subsample with
better covariate balance.
The default cutoff value is set to 0.1. To set a custom cutoff
value, modify the object attribute named cutoff directly.
This method should only be executed after the propensity score
has been estimated.
"""
if 0 < self.cutoff <= 0.5:
pscore = self.raw_data['pscore']
keep = (pscore >= self.cutoff) & (pscore <= 1-self.cutoff)
Y_trimmed = self.raw_data['Y'][keep]
D_trimmed = self.raw_data['D'][keep]
X_trimmed = self.raw_data['X'][keep]
self.raw_data = Data(Y_trimmed, D_trimmed, X_trimmed)
self.raw_data._dict['pscore'] = pscore[keep]
self.summary_stats = Summary(self.raw_data)
self.strata = None
self.estimates = Estimators()
elif self.cutoff == 0:
pass
else:
raise ValueError('Invalid cutoff.')
def trim_s(self):
"""
Trims data based on propensity score using the cutoff
selection algorithm suggested by Crump, Hotz, Imbens, and
Mitnik (2009).
This method should only be executed after the propensity score
has been estimated.
References
----------
Crump, R., Hotz, V., Imbens, G., & Mitnik, O. (2009).
Dealing with Limited Overlap in Estimation of
Average Treatment Effects. Biometrika, 96,
187-199.
"""
pscore = self.raw_data['pscore']
g = 1.0/(pscore*(1-pscore)) # 1 over Bernoulli variance
self.cutoff = select_cutoff(g)
self.trim()
def stratify(self):
"""
Stratifies the sample based on propensity score.
By default the sample is divided into five equal-sized bins.
The number of bins can be set by modifying the object
attribute named blocks. Alternatively, custom-sized bins can
be created by setting blocks equal to a sorted list of numbers
between 0 and 1 indicating the bin boundaries.
This method should only be executed after the propensity score
has been estimated.
"""
Y, D, X = self.raw_data['Y'], self.raw_data['D'], self.raw_data['X']
pscore = self.raw_data['pscore']
if isinstance(self.blocks, int):
blocks = split_equal_bins(pscore, self.blocks)
else:
blocks = self.blocks[:] # make a copy; should be sorted
blocks[0] = 0 # avoids always dropping 1st unit
def subset(p_low, p_high):
return (p_low < pscore) & (pscore <= p_high)
subsets = [subset(*ps) for ps in zip(blocks, blocks[1:])]
strata = [CausalModel(Y[s], D[s], X[s]) for s in subsets]
self.strata = Strata(strata, subsets, pscore)
def stratify_s(self):
"""
Stratifies the sample based on propensity score using the
bin selection procedure suggested by Imbens and Rubin (2015).
The bin selection algorithm is based on a sequence of
two-sample t tests performed on the log-odds ratio.
This method should only be executed after the propensity score
has been estimated.
References
----------
Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
pscore_order = self.raw_data['pscore'].argsort()
pscore = self.raw_data['pscore'][pscore_order]
D = self.raw_data['D'][pscore_order]
logodds = np.log(pscore / (1-pscore))
K = self.raw_data['K']
blocks_uniq = set(select_blocks(pscore, logodds, D, K, 0, 1))
self.blocks = sorted(blocks_uniq)
self.stratify()
def est_via_ols(self, adj=2):
"""
Estimates average treatment effects using least squares.
Expected args
-------------
adj: integer; 0, 1, or 2.
Indicates how covariate adjustments are to be
performed. Set adj = 0 to not include any
covariates. Set adj = 1 to include treatment
indicator D and covariates X separately. Set
adj = 2 to additionally include interaction
terms between D and X. Defaults to 2.
"""
self.estimates['ols'] = OLS(self.raw_data, adj)
def est_via_blocking(self, adj=1):
"""
Estimates average treatment effects using regression within
blocks.
This method should only be executed after the sample has been
stratified.
Expected args
-------------
adj: integer; 0, 1, or 2.
Indicates how covariate adjustments are to be
performed for each within-bin regression.
Set adj = 0 to not include any covariates.
Set adj = 1 to include treatment indicator D
and covariates X separately. Set adj = 2 to
additionally include interaction terms between
D and X. Defaults to 1.
"""
self.estimates['blocking'] = Blocking(self.strata, adj)
def est_via_weighting(self):
"""
Estimates average treatment effects using doubly-robust
version of the Horvitz-Thompson weighting estimator.
"""
self.estimates['weighting'] = Weighting(self.raw_data)
def est_via_matching(self, weights='inv', matches=1, bias_adj=False):
"""
Estimates average treatment effects using nearest-
neighborhood matching.
Matching is done with replacement. Method supports multiple
matching. Correcting bias that arise due to imperfect matches
is also supported. For details on methodology, see Imbens
and Rubin (2015).
Expected args
-------------
weights: string or positive definite square matrix.
Specifies weighting matrix used in computing
distance measures. Defaults to string 'inv',
which does inverse variance weighting. String
'maha' gives the weighting matrix used in the
Mahalanobis metric.
matches: integer.
Number of matches to use for each subject.
bias_adj: Boolean.
Specifies whether bias adjustments should be
attempted.
References
----------
Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
X, K = self.raw_data['X'], self.raw_data['K']
X_c, X_t = self.raw_data['X_c'], self.raw_data['X_t']
if weights == 'inv':
W = 1/X.var(0)
elif weights == 'maha':
V_c = np.cov(X_c, rowvar=False, ddof=0)
V_t = np.cov(X_t, rowvar=False, ddof=0)
if K == 1:
W = 1/np.array([[(V_c+V_t)/2]]) # matrix form
else:
W = np.linalg.inv((V_c+V_t)/2)
else:
W = weights
self.estimates['matching'] = Matching(self.raw_data, W,
matches, bias_adj)
def _post_pscore_init(self):
self.cutoff = 0.1
self.blocks = 5
def parse_lin_terms(K, lin):
if lin is None:
return []
elif lin == 'all':
return range(K)
else:
return lin
def parse_qua_terms(K, qua):
if qua is None:
return []
elif qua == 'all':
return list(combinations_with_replacement(range(K), 2))
else:
return qua
def sumlessthan(g, sorted_g, cumsum):
deduped_values = dict(zip(sorted_g, cumsum))
return np.array([deduped_values[x] for x in g])
def select_cutoff(g):
if g.max() <= 2*g.mean():
cutoff = 0
else:
sorted_g = np.sort(g)
cumsum_1 = range(1, len(g)+1)
LHS = g * sumlessthan(g, sorted_g, cumsum_1)
cumsum_g = np.cumsum(sorted_g)
RHS = 2 * sumlessthan(g, sorted_g, cumsum_g)
gamma = np.max(g[LHS <= RHS])
cutoff = 0.5 - np.sqrt(0.25 - 1./gamma)
return cutoff
def split_equal_bins(pscore, blocks):
q = np.linspace(0, 100, blocks+1)[1:-1] # q as in qth centiles
centiles = [np.percentile(pscore, x) for x in q]
return [0] + centiles + [1]
def calc_tstat(sample_c, sample_t):
N_c = sample_c.shape[0]
N_t = sample_t.shape[0]
var_c = sample_c.var(ddof=1)
var_t = sample_t.var(ddof=1)
return (sample_t.mean()-sample_c.mean()) / np.sqrt(var_c/N_c+var_t/N_t)
def calc_sample_sizes(D):
N = D.shape[0]
mid_index = N // 2
Nleft = mid_index
Nleft_t = D[:mid_index].sum()
Nleft_c = Nleft - Nleft_t
Nright = N - Nleft
Nright_t = D[mid_index:].sum()
Nright_c = Nright - Nright_t
return (Nleft_c, Nleft_t, Nright_c, Nright_t)
def select_blocks(pscore, logodds, D, K, p_low, p_high):
scope = (pscore >= p_low) & (pscore <= p_high)
c, t = (scope & (D==0)), (scope & (D==1))
Nleft_c, Nleft_t, Nright_c, Nright_t = calc_sample_sizes(D[scope])
if min(Nleft_c, Nleft_t, Nright_c, Nright_t) < K+1:
return [p_low, p_high]
tstat = calc_tstat(logodds[c], logodds[t])
if tstat <= 1.96:
return [p_low, p_high]
low = pscore[scope][0]
mid = pscore[scope][scope.sum() // 2]
high = pscore[scope][-1]
return select_blocks(pscore, logodds, D, K, low, mid) + \
select_blocks(pscore, logodds, D, K, mid, high)
|
|
#!/usr/bin/env python
#
# Copyright 2015 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""\
There are two classes provided for implementing CSS-CII clients:
* :class:`CIIClient` connects to a CII server and provides a CII message
object representing the complete state of the server and notifies you of changes of state.
* :class:`CIIClientConnection` provides a lower level connection
to a CII server and only provides the messages received from the server.
It does not maintain a model of the server state and does not work out when
a received message constitutes a change.
An :doc:`example <examples>` client is provided in this package that uses the :class:`CIIClient` class.
Using CIIClient
---------------
This is the simplest class to use. Create it, passing the URL of the server to connect to,
then call :func:`~CIIClient.connect` and :func:`~CIIClient.disconnect` to connect and disconnect from the server.
CIIClient maintains a local copy of the state of CII data the :data:`CIIClient.cii` property and the most
recently received CII message in :data:`CIIClient.latestCII`.
You can use the class either by subclassing and overriding the various stub methods
or by creating an instance and replacing the stub methods with your own function handlers
dynamically.
Pass the WebSocket URL of the CII server when creating the `CIIClient` then call the :func:`~CIIClient.connect()`
and :func:`~CIIClient.disconnect()` methods to connect and disconnect from the server. The `onXXX()` methods
can be overriden to find out when connection or disconnection takes place, if there is a protocol error (e.g.
a message was received that could not be parsed as CII) or when properties of the CII data change.
The CII state is kept in the :data:`~CIIClient.cii` property of the object. This is updated with properties
in CII messages that are received. Properties not included in a CII message are left unchanged.
Properties of the CII state whose value is :data:`dvbcss.protocol.OMIT` have not been defined by the CII server.
.. code-block:: python
from dvbcss.protocol.client.cii import CIIClient
class MyCIIClient(CIIClient):
def onConnected(self):
print "Connected!"
def onDisconnected(self, code, reason):
print "Disconnected :-("
def onChange(self, propertyNames):
print "The following CII properties have changed:
for name in propertyNames:
value = getattr(conn.cii, name)
print " "+name+" is now: "+str(value)
# one example of a handler for changes to a particular property 'contentId' in CII
def onContentIdChange(self, newValue):
print "The contentId property has changed to now be: "+str(newValue)
client = MyCIIClient("ws://127.0.0.1/cii")
client.connect()
time.sleep(60)
print "The current contentId is "+client.cii.contentId
time.sleep(60) # wait only 60 more seconds then disconnect
client.disconnect()
The client runs in a separate thread managed by the websocket client library, so the `onXXX` methods are called while the main thread sleeps.
Using CIIClientConnection
-------------------------
This is a lower level class, that only implements parsing of the incoming CII messages from the server.
It does not detect if a message actually constitutes a change of state or not.
You can use the class either by subclassing and overriding the various stub methods
or by creating an instance and replacing the stub methods with your own function handlers
dynamically.
Pass the WebSocket URL of the CII server when creating the `CIIClientConnection` object then call the :func:`~CIIClientConnection.connect`
and :func:`~CIIClientConnection.disconnect` methods to connect and disconnect from the server. The `onXXX()` methods
can be overridden to find out when connection or disconnection takes place, if there is a protocol error (e.g.
a message was received that could not be parsed as CII) or when a new CII message is received.
.. code-block:: python
from dvbcss.protocol.client.cii import CIIClientConnection
class MyCIIClientConnection(CIIClientConnection):
def onConnected(self):
print "Connected!"
def onDisconnected(self, code, reason):
print "Disconnected :-("
def onCii(self, cii):
print "Received a CII message: "+str(cii)
client = MyCIIClientConnection("ws://127.0.0.1/cii")
client.connect()
time.sleep(60) # run only for 60 seconds then disconnect
client.disconnect()
"""
import logging
import socket
from dvbcss.protocol.cii import CII
from dvbcss.protocol.client import WrappedWebSocket
from dvbcss.protocol.client import ConnectionError
class CIIClientConnection(object):
"""\
Simple object for connecting to a CSS-CII server and handling the connection.
Use by subclassing and overriding the following methods:
* :func:`onConnected`
* :func:`onDisconnected`
* :func:`onCII`
* :func:`onProtocolError`
If you do not wish to subclass, you can instead create an instance of this
class and replace the methods listed above with your own functions dynamically.
"""
def __init__(self, url):
"""\
**Initialisation takes the following parameters:**
:param: url (:class:`str`) The WebSocket URL of the CII Server to connect to. E.g. "ws://127.0.0.1/mysystem/cii"
"""
super(CIIClientConnection,self).__init__()
self.log = logging.getLogger("dvbcss.protocol.client.cii.CIIClientConnection")
self._ws = WrappedWebSocket(url, self)
self._isOpen = False
def onDisconnected(self, code, reason=None):
"""\
This method is called when the connection is closed.
|stub-method|
:param code: (:class:`int`) The connection closure code to be sent in the WebSocket disconnect frame
:param reason: (:class:`str` or :class:`None`) The human readable reason for the closure
"""
pass
def onConnected(self):
"""\
This method is called when the connection is opened.
|stub-method|
"""
pass
def onCII(self, cii):
"""\
This method is called when a CII message is received from the server.
|stub-method|
:param cii: A :class:`~dvbcss.protocol.cii.CII` object representing the received message.
"""
pass
def onProtocolError(self, msg):
"""\
This method is called when there has been an error in the use of the CII protocol - e.g. receiving the wrong kind of message.
|stub-method|
:param msg: A :class:`str` description of the problem.
"""
pass
@property
def connected(self):
"""True if the connection is connect, otherwise False"""
return self._isOpen
def connect(self):
"""\
Open the connection.
:throws :class:`ConnectionError` if there was a problem and the connection could not be opened.
"""
if not self._isOpen:
self.log.debug("Opening connection")
try:
self._ws.connect()
except ConnectionError, e:
raise e
except socket.error, e:
raise ConnectionError()
def disconnect(self, code=1001, reason=''):
"""\
Close the connection.
:param code: (optional :class:`int`) The connection closure code to be sent in the WebSocket disconnect frame
:param reason: (optional :class:`str`) The human readable reason for the closure
"""
self._isOpen = False
self._ws.close(code, reason)
self._ws.close_connection()
def _ws_on_open(self):
self._isOpen=True
self.log.debug("Connection opened.")
self.onConnected()
def _ws_on_close(self, code, reason=None):
self._isOpen=False
self.log.debug("Connection closed.")
self.onDisconnected(code,reason)
def _ws_on_disconnected(self):
self._isOpen=False
def _ws_on_error(self, msg):
self.log.error("CII Protocol error: "+msg+"\n")
self.onProtocolError(msg)
def _ws_on_message(self, msg):
self.log.debug("Message received.")
if not msg.is_text:
self._ws_on_error("Protocol error - message received was not a text frame")
return
try:
cii = CII.unpack(msg.data)
except Exception,e:
self._ws_on_error("Protocol error - message received could not be parsed as a CII message: "+str(msg)+". Continuing anyway. Cause was: "+str(e)+"\n")
return
self.onCII(cii)
class CIIClient(object):
"""\
Manages a CSS-CII protocol connection to a CSS-CII Server and notifies of changes to CII state.
Use by subclassing and overriding the following methods:
* :func:`onConnected`
* :func:`onDisconnected`
* :func:`onChange`
* individual `onXXXXChange()` methods named after each CII property
* :func:`onCiiReceived` (do not use, by preference)
If you do not wish to subclass, you can instead create an instance of this
class and replace the methods listed above with your own functions dynamically.
The :func:`connect` and :func:`disconnect` methods connect and disconnect the connection to the server
and :func:`getStatusSummary` provides a human readable summary of CII state.
This object also provides properties you can query:
* :data:`cii` represents the current state of CII at the server
* :data:`latestCII` is the most recently CII message received from the server
* :data:`connected` indicates whether the connection is currently connect
"""
def __init__(self, ciiUrl):
"""\
**Initialisation takes the following parameters:**
:param ciiUrl: (:class:`str`) The WebSocket URL of the CSS-CII Server (e.g. "ws://127.0.0.1/myservice/cii")
"""
super(CIIClient,self).__init__()
self.log = logging.getLogger("dvbcss.protocol.client.cii.CIIClient")
self._conn = CIIClientConnection(ciiUrl)
self._conn.onCII = self._onCII
self._conn.onConnected = self._onConnectionOpen
self._conn.onDisconnected = self._onConnectionClose
self._conn.onProtocolError = self._onProtocolError
self.connected = False #: True if currently connected to the server, otherwise False.
self.cii = CII() #: (:class:`~dvbcss.protocol.cii.CII`) CII object representing the CII state at the server
self.latestCII = None #: (:class:`~dvbcss.protocol.cii.CII` or :class:`None`) The most recent CII message received from the server or None if nothing has yet been received.
self._callBackFuncNames = {}
for name in CII.allProperties():
funcname = "on" + name[0].upper() + name[1:] + "Change"
self._callBackFuncNames[name] = funcname
def onConnected(self):
"""\
This method is called when the connection is opened.
|stub-method|
"""
pass
def onDisconnected(self, code, reason=None):
"""\
This method is called when the connection is closed.
|stub-method|
:param code: (:class:`int`) The connection closure code to be sent in the WebSocket disconnect frame
:param reason: (:class:`str` or :class:`None`) The human readable reason for the closure
"""
pass
def onChange(self, changedPropertyNames):
"""\
This method is called when a CII message is received from the server that causes one or more of the CII properties to change to a different value.
:param changedPropertyNames: A :class:`list` of :class:`str` names of the properties that have changed. Query the :data:`cii` attribute to find out the new values.
"""
pass
def onProtocolError(self, msg):
"""\
This method is called when there has been an error in the use of the CII protocol - e.g. receiving the wrong kind of message.
|stub-method|
:param msg: A :class:`str` description of the problem.
"""
pass
def onCiiReceived(self, newCii):
"""\
This method is called when a CII message is received, but before any 'onXXXXChange()' handlers (if any) are called.
It is called even if the message does not result in a change to CII state held locally.
By preference is recommended to use the 'onXXXXChange()' handlers instead since these will only be called if there
is an actual change to the value of a property in CII state.
|stub-method|
:param cii: A :class:`~dvbcss.protocol.cii.CII` object representing the received message.
"""
pass
def connect(self):
"""\
Start the client by trying to open the connection.
:throws ConnectionError: There was a problem that meant it was not possible to connect.
"""
self._conn.connect()
def disconnect(self):
"""\
Disconnect from the server.
"""
self._conn.disconnect()
def _onConnectionOpen(self):
self.connected=True
self.onConnected()
def _onConnectionClose(self, code, reason):
self.connected=False
self.onDisconnected()
def _onProtocolError(self, msg):
self.log.error("There was a protocol error: "+msg+". Continuing anyway.")
self.onProtocolError(msg)
def _onCII(self, newCII):
self.latestCII = newCII
self.onCiiReceived(newCII)
# take a diff since we cannot assume the received message is a diff
diff=CII.diff(self.cii, newCII)
changes=diff.definedProperties()
if len(changes) > 0:
self.log.debug("Changed properties: "+ " ".join(changes))
self.cii.update(diff)
# now we examine changes and fire change specific callbacks as well as a general callback
for name in changes:
if name in changes:
funcname = self._callBackFuncNames[name]
callback = getattr(self, funcname)
if callback is not None:
newValue=getattr(diff, name)
callback(newValue)
# fire general catch-all callback
self.onChange(changes)
else:
self.log.debug("No properties have changed")
def getStatusSummary(self):
if self.latestCII is None:
return "Nothing received from TV yet."
return str(self.cii)
# programmatically create the onXXXChange methods for every property in a CII message
for propertyName in CII.allProperties():
def f(self, newValue):
pass
f.__doc__="Called when the "+propertyName+" property of the CII message has been changed by a state update from the CII Server.\n\n" + \
"|stub-method|\n\n" + \
":param newValue: The new value for this property."
setattr(CIIClient, "on"+propertyName[0].upper() + propertyName[1:]+"Change", f)
__all__ = [
"CIIClientConnection",
"CIIClient",
]
|
|
#!/usr/bin/env python3
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Usage: gn_to_cmake.py <json_file_name>
gn gen out/config --ide=json --json-ide-script=../../gn/gn_to_cmake.py
or
gn gen out/config --ide=json
python gn/gn_to_cmake.py out/config/project.json
The first is recommended, as it will auto-update.
"""
from __future__ import print_function
import functools
import json
import posixpath
import string
import sys
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def CMakeTargetEscape(a):
"""Escapes the string 'a' for use as a CMake target name.
CMP0037 in CMake 3.0 restricts target names to "^[A-Za-z0-9_.:+-]+$"
The ':' is only allowed for imported targets.
"""
def Escape(c):
if c in string.ascii_letters or c in string.digits or c in '_.+-':
return c
return '__'
return ''.join([Escape(c) for c in a])
def SetVariable(out, variable_name, value):
"""Sets a CMake variable."""
out.write('set("')
out.write(CMakeStringEscape(variable_name))
out.write('" "')
out.write(CMakeStringEscape(value))
out.write('")\n')
def SetVariableList(out, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
SetVariable(out, variable_name, "")
return
if len(values) == 1:
SetVariable(out, variable_name, values[0])
return
out.write('list(APPEND "')
out.write(CMakeStringEscape(variable_name))
out.write('"\n "')
out.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
out.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetCurrentTargetProperty(out, property_name, values, sep=''):
"""Given a target, sets the given property."""
out.write('set_target_properties("${target}" PROPERTIES ')
out.write(property_name)
out.write(' "')
for value in values:
out.write(CMakeStringEscape(value))
out.write(sep)
out.write('")\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
# See GetSourceFileType in gn
source_file_types = {
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.c': 'c',
'.s': 'asm',
'.S': 'asm',
'.asm': 'asm',
'.o': 'obj',
'.obj': 'obj',
}
class CMakeTargetType:
def __init__(self, command, modifier, property_modifier, is_linkable):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
self.is_linkable = is_linkable
CMakeTargetType.custom = CMakeTargetType('add_custom_target', 'SOURCES',
None, False)
# See GetStringForOutputType in gn
cmake_target_types = {
'unknown': CMakeTargetType.custom,
'group': CMakeTargetType.custom,
'executable': CMakeTargetType('add_executable', None, 'RUNTIME', True),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY', True),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY', True),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE', False),
'source_set': CMakeTargetType('add_library', 'OBJECT', None, False),
'copy': CMakeTargetType.custom,
'action': CMakeTargetType.custom,
'action_foreach': CMakeTargetType.custom,
'bundle_data': CMakeTargetType.custom,
'create_bundle': CMakeTargetType.custom,
}
def FindFirstOf(s, a):
return min(s.find(i) for i in a if i in s)
def GetCMakeTargetName(gn_target_name):
# See <chromium>/src/tools/gn/label.cc#Resolve
# //base/test:test_support(//build/toolchain/win:msvc)
path_separator = FindFirstOf(gn_target_name, (':', '('))
location = None
name = None
toolchain = None
if not path_separator:
location = gn_target_name[2:]
else:
location = gn_target_name[2:path_separator]
toolchain_separator = gn_target_name.find('(', path_separator)
if toolchain_separator == -1:
name = gn_target_name[path_separator + 1:]
else:
if toolchain_separator > path_separator:
name = gn_target_name[path_separator + 1:toolchain_separator]
assert gn_target_name.endswith(')')
toolchain = gn_target_name[toolchain_separator + 1:-1]
assert location or name
cmake_target_name = None
if location.endswith('/' + name):
cmake_target_name = location
elif location:
cmake_target_name = location + '_' + name
else:
cmake_target_name = name
if toolchain:
cmake_target_name += '--' + toolchain
return CMakeTargetEscape(cmake_target_name)
class Project:
def __init__(self, project_json):
self.targets = project_json['targets']
build_settings = project_json['build_settings']
self.root_path = build_settings['root_path']
self.build_path = posixpath.join(self.root_path,
build_settings['build_dir'][2:])
self.object_source_deps = {}
def GetAbsolutePath(self, path):
if path.startswith("//"):
return self.root_path + "/" + path[2:]
return path
def GetObjectSourceDependencies(self, gn_target_name, object_dependencies):
"""All OBJECT libraries whose sources have not been absorbed."""
if gn_target_name in self.object_source_deps:
object_dependencies.update(self.object_source_deps[gn_target_name])
return
target_deps = set()
dependencies = self.targets[gn_target_name].get('deps', [])
for dependency in dependencies:
dependency_type = self.targets[dependency].get('type', None)
if dependency_type == 'source_set':
target_deps.add(dependency)
if dependency_type not in gn_target_types_that_absorb_objects:
self.GetObjectSourceDependencies(dependency, target_deps)
self.object_source_deps[gn_target_name] = target_deps
object_dependencies.update(target_deps)
def GetObjectLibraryDependencies(self, gn_target_name, object_dependencies):
"""All OBJECT libraries whose libraries have not been absorbed."""
dependencies = self.targets[gn_target_name].get('deps', [])
for dependency in dependencies:
dependency_type = self.targets[dependency].get('type', None)
if dependency_type == 'source_set':
object_dependencies.add(dependency)
self.GetObjectLibraryDependencies(dependency, object_dependencies)
class Target:
def __init__(self, gn_target_name, project):
self.gn_name = gn_target_name
self.properties = project.targets[self.gn_name]
self.cmake_name = GetCMakeTargetName(self.gn_name)
self.gn_type = self.properties.get('type', None)
self.cmake_type = cmake_target_types.get(self.gn_type, None)
def WriteAction(out, target, project, sources, synthetic_dependencies):
outputs = []
output_directories = set()
for output in target.properties.get('outputs', []):
output_abs_path = project.GetAbsolutePath(output)
outputs.append(output_abs_path)
output_directory = posixpath.dirname(output_abs_path)
if output_directory:
output_directories.add(output_directory)
outputs_name = '${target}__output'
SetVariableList(out, outputs_name, outputs)
out.write('add_custom_command(OUTPUT ')
WriteVariable(out, outputs_name)
out.write('\n')
if output_directories:
out.write(' COMMAND ${CMAKE_COMMAND} -E make_directory "')
out.write('" "'.join([CMakeStringEscape(d) for d in output_directories]))
out.write('"\n')
script = target.properties['script']
arguments = target.properties['args']
out.write(' COMMAND python "')
out.write(CMakeStringEscape(project.GetAbsolutePath(script)))
out.write('"')
if arguments:
out.write('\n "')
out.write('"\n "'.join([CMakeStringEscape(a) for a in arguments]))
out.write('"')
out.write('\n')
out.write(' DEPENDS ')
for sources_type_name in sources.values():
WriteVariable(out, sources_type_name, ' ')
out.write('\n')
#TODO: CMake 3.7 is introducing DEPFILE
out.write(' WORKING_DIRECTORY "')
out.write(CMakeStringEscape(project.build_path))
out.write('"\n')
out.write(' COMMENT "Action: ${target}"\n')
out.write(' VERBATIM)\n')
synthetic_dependencies.add(outputs_name)
def ExpandPlaceholders(source, a):
source_dir, source_file_part = posixpath.split(source)
source_name_part, _ = posixpath.splitext(source_file_part)
#TODO: {{source_gen_dir}}, {{source_out_dir}}, {{response_file_name}}
return a.replace('{{source}}', source) \
.replace('{{source_file_part}}', source_file_part) \
.replace('{{source_name_part}}', source_name_part) \
.replace('{{source_dir}}', source_dir) \
.replace('{{source_root_relative_dir}}', source_dir)
def WriteActionForEach(out, target, project, sources, synthetic_dependencies):
all_outputs = target.properties.get('outputs', [])
inputs = target.properties.get('sources', [])
# TODO: consider expanding 'output_patterns' instead.
outputs_per_input = len(all_outputs) / len(inputs)
for count, source in enumerate(inputs):
source_abs_path = project.GetAbsolutePath(source)
outputs = []
output_directories = set()
for output in all_outputs[outputs_per_input * count:
outputs_per_input * (count+1)]:
output_abs_path = project.GetAbsolutePath(output)
outputs.append(output_abs_path)
output_directory = posixpath.dirname(output_abs_path)
if output_directory:
output_directories.add(output_directory)
outputs_name = '${target}__output_' + str(count)
SetVariableList(out, outputs_name, outputs)
out.write('add_custom_command(OUTPUT ')
WriteVariable(out, outputs_name)
out.write('\n')
if output_directories:
out.write(' COMMAND ${CMAKE_COMMAND} -E make_directory "')
out.write('" "'.join([CMakeStringEscape(d) for d in output_directories]))
out.write('"\n')
script = target.properties['script']
# TODO: need to expand {{xxx}} in arguments
arguments = target.properties['args']
out.write(' COMMAND python "')
out.write(CMakeStringEscape(project.GetAbsolutePath(script)))
out.write('"')
if arguments:
out.write('\n "')
expand = functools.partial(ExpandPlaceholders, source_abs_path)
out.write('"\n "'.join(
[CMakeStringEscape(expand(a)) for a in arguments]))
out.write('"')
out.write('\n')
out.write(' DEPENDS')
if 'input' in sources:
WriteVariable(out, sources['input'], ' ')
out.write(' "')
out.write(CMakeStringEscape(source_abs_path))
out.write('"\n')
#TODO: CMake 3.7 is introducing DEPFILE
out.write(' WORKING_DIRECTORY "')
out.write(CMakeStringEscape(project.build_path))
out.write('"\n')
out.write(' COMMENT "Action ${target} on ')
out.write(CMakeStringEscape(source_abs_path))
out.write('"\n')
out.write(' VERBATIM)\n')
synthetic_dependencies.add(outputs_name)
def WriteCopy(out, target, project, sources, synthetic_dependencies):
inputs = target.properties.get('sources', [])
raw_outputs = target.properties.get('outputs', [])
# TODO: consider expanding 'output_patterns' instead.
outputs = []
for output in raw_outputs:
output_abs_path = project.GetAbsolutePath(output)
outputs.append(output_abs_path)
outputs_name = '${target}__output'
SetVariableList(out, outputs_name, outputs)
out.write('add_custom_command(OUTPUT ')
WriteVariable(out, outputs_name)
out.write('\n')
for src, dst in zip(inputs, outputs):
out.write(' COMMAND ${CMAKE_COMMAND} -E copy "')
out.write(CMakeStringEscape(project.GetAbsolutePath(src)))
out.write('" "')
out.write(CMakeStringEscape(dst))
out.write('"\n')
out.write(' DEPENDS ')
for sources_type_name in sources.values():
WriteVariable(out, sources_type_name, ' ')
out.write('\n')
out.write(' WORKING_DIRECTORY "')
out.write(CMakeStringEscape(project.build_path))
out.write('"\n')
out.write(' COMMENT "Copy ${target}"\n')
out.write(' VERBATIM)\n')
synthetic_dependencies.add(outputs_name)
def WriteCompilerFlags(out, target, project, sources):
# Hack, set linker language to c if no c or cxx files present.
if not 'c' in sources and not 'cxx' in sources:
SetCurrentTargetProperty(out, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if 'input' in sources:
SetFilesProperty(out, sources['input'], 'HEADER_FILE_ONLY', ('True',), '')
if 'other' in sources:
SetFilesProperty(out, sources['other'], 'HEADER_FILE_ONLY', ('True',), '')
# Mark object sources as linkable.
if 'obj' in sources:
SetFilesProperty(out, sources['obj'], 'EXTERNAL_OBJECT', ('True',), '')
# TODO: 'output_name', 'output_dir', 'output_extension'
# This includes using 'source_outputs' to direct compiler output.
# Includes
includes = target.properties.get('include_dirs', [])
if includes:
out.write('set_property(TARGET "${target}" ')
out.write('APPEND PROPERTY INCLUDE_DIRECTORIES')
for include_dir in includes:
out.write('\n "')
out.write(project.GetAbsolutePath(include_dir))
out.write('"')
out.write(')\n')
# Defines
defines = target.properties.get('defines', [])
if defines:
SetCurrentTargetProperty(out, 'COMPILE_DEFINITIONS', defines, ';')
# Compile flags
# "arflags", "asmflags", "cflags",
# "cflags_c", "clfags_cc", "cflags_objc", "clfags_objcc"
# CMake does not have per target lang compile flags.
# TODO: $<$<COMPILE_LANGUAGE:CXX>:cflags_cc style generator expression.
# http://public.kitware.com/Bug/view.php?id=14857
flags = []
flags.extend(target.properties.get('cflags', []))
cflags_asm = target.properties.get('asmflags', [])
cflags_c = target.properties.get('cflags_c', [])
cflags_cxx = target.properties.get('cflags_cc', [])
if 'c' in sources and not any(k in sources for k in ('asm', 'cxx')):
flags.extend(cflags_c)
elif 'cxx' in sources and not any(k in sources for k in ('asm', 'c')):
flags.extend(cflags_cxx)
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if 'asm' in sources and cflags_asm:
SetFilesProperty(out, sources['asm'], 'COMPILE_FLAGS', cflags_asm, ' ')
if 'c' in sources and cflags_c:
SetFilesProperty(out, sources['c'], 'COMPILE_FLAGS', cflags_c, ' ')
if 'cxx' in sources and cflags_cxx:
SetFilesProperty(out, sources['cxx'], 'COMPILE_FLAGS', cflags_cxx, ' ')
if flags:
SetCurrentTargetProperty(out, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = target.properties.get('ldflags', [])
if ldflags:
SetCurrentTargetProperty(out, 'LINK_FLAGS', ldflags, ' ')
gn_target_types_that_absorb_objects = (
'executable',
'loadable_module',
'shared_library',
'static_library'
)
def WriteSourceVariables(out, target, project):
# gn separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see Compile flags).
source_types = {'cxx':[], 'c':[], 'asm':[],
'obj':[], 'obj_target':[], 'input':[], 'other':[]}
# TODO .def files on Windows
for source in target.properties.get('sources', []):
_, ext = posixpath.splitext(source)
source_abs_path = project.GetAbsolutePath(source)
source_types[source_file_types.get(ext, 'other')].append(source_abs_path)
for input_path in target.properties.get('inputs', []):
input_abs_path = project.GetAbsolutePath(input_path)
source_types['input'].append(input_abs_path)
# OBJECT library dependencies need to be listed as sources.
# Only executables and non-OBJECT libraries may reference an OBJECT library.
# https://gitlab.kitware.com/cmake/cmake/issues/14778
if target.gn_type in gn_target_types_that_absorb_objects:
object_dependencies = set()
project.GetObjectSourceDependencies(target.gn_name, object_dependencies)
for dependency in object_dependencies:
cmake_dependency_name = GetCMakeTargetName(dependency)
obj_target_sources = '$<TARGET_OBJECTS:' + cmake_dependency_name + '>'
source_types['obj_target'].append(obj_target_sources)
sources = {}
for source_type, sources_of_type in source_types.items():
if sources_of_type:
sources[source_type] = '${target}__' + source_type + '_srcs'
SetVariableList(out, sources[source_type], sources_of_type)
return sources
def WriteTarget(out, target, project):
out.write('\n#')
out.write(target.gn_name)
out.write('\n')
if target.cmake_type is None:
print('Target {} has unknown target type {}, skipping.'.format(
target.gn_name, target.gn_type))
return
SetVariable(out, 'target', target.cmake_name)
sources = WriteSourceVariables(out, target, project)
synthetic_dependencies = set()
if target.gn_type == 'action':
WriteAction(out, target, project, sources, synthetic_dependencies)
if target.gn_type == 'action_foreach':
WriteActionForEach(out, target, project, sources, synthetic_dependencies)
if target.gn_type == 'copy':
WriteCopy(out, target, project, sources, synthetic_dependencies)
out.write(target.cmake_type.command)
out.write('("${target}"')
if target.cmake_type.modifier is not None:
out.write(' ')
out.write(target.cmake_type.modifier)
for sources_type_name in sources.values():
WriteVariable(out, sources_type_name, ' ')
if synthetic_dependencies:
out.write(' DEPENDS')
for synthetic_dependencie in synthetic_dependencies:
WriteVariable(out, synthetic_dependencie, ' ')
out.write(')\n')
if target.cmake_type.command != 'add_custom_target':
WriteCompilerFlags(out, target, project, sources)
libraries = set()
nonlibraries = set()
dependencies = set(target.properties.get('deps', []))
# Transitive OBJECT libraries are in sources.
# Those sources are dependent on the OBJECT library dependencies.
# Those sources cannot bring in library dependencies.
object_dependencies = set()
if target.gn_type != 'source_set':
project.GetObjectLibraryDependencies(target.gn_name, object_dependencies)
for object_dependency in object_dependencies:
dependencies.update(project.targets.get(object_dependency).get('deps', []))
for dependency in dependencies:
gn_dependency_type = project.targets.get(dependency, {}).get('type', None)
cmake_dependency_type = cmake_target_types.get(gn_dependency_type, None)
cmake_dependency_name = GetCMakeTargetName(dependency)
if cmake_dependency_type.command != 'add_library':
nonlibraries.add(cmake_dependency_name)
elif cmake_dependency_type.modifier != 'OBJECT':
if target.cmake_type.is_linkable:
libraries.add(cmake_dependency_name)
else:
nonlibraries.add(cmake_dependency_name)
# Non-library dependencies.
if nonlibraries:
out.write('add_dependencies("${target}"')
for nonlibrary in nonlibraries:
out.write('\n "')
out.write(nonlibrary)
out.write('"')
out.write(')\n')
# Non-OBJECT library dependencies.
external_libraries = target.properties.get('libs', [])
if target.cmake_type.is_linkable and (external_libraries or libraries):
library_dirs = target.properties.get('lib_dirs', [])
if library_dirs:
SetVariableList(out, '${target}__library_directories', library_dirs)
system_libraries = []
for external_library in external_libraries:
if '/' in external_library:
libraries.add(project.GetAbsolutePath(external_library))
else:
if external_library.endswith('.framework'):
external_library = external_library[:-len('.framework')]
system_library = 'library__' + external_library
if library_dirs:
system_library = system_library + '__for_${target}'
out.write('find_library("')
out.write(CMakeStringEscape(system_library))
out.write('" "')
out.write(CMakeStringEscape(external_library))
out.write('"')
if library_dirs:
out.write(' PATHS "')
WriteVariable(out, '${target}__library_directories')
out.write('"')
out.write(')\n')
system_libraries.append(system_library)
out.write('target_link_libraries("${target}"')
for library in libraries:
out.write('\n "')
out.write(CMakeStringEscape(library))
out.write('"')
for system_library in system_libraries:
WriteVariable(out, system_library, '\n "')
out.write('"')
out.write(')\n')
def WriteProject(project):
out = open(posixpath.join(project.build_path, 'CMakeLists.txt'), 'w+')
out.write('# Generated by gn_to_cmake.py.\n')
out.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
out.write('cmake_policy(VERSION 2.8.8)\n\n')
# Update the gn generated ninja build.
# If a build file has changed, this will update CMakeLists.ext if
# gn gen out/config --ide=json --json-ide-script=../../gn/gn_to_cmake.py
# style was used to create this config.
out.write('execute_process(COMMAND ninja -C "')
out.write(CMakeStringEscape(project.build_path))
out.write('" build.ninja)\n')
out.write('include(CMakeLists.ext)\n')
out.close()
out = open(posixpath.join(project.build_path, 'CMakeLists.ext'), 'w+')
out.write('# Generated by gn_to_cmake.py.\n')
out.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
out.write('cmake_policy(VERSION 2.8.8)\n')
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
out.write('enable_language(ASM)\n\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
# Current issues with automatic re-generation:
# The gn generated build.ninja target uses build.ninja.d
# but build.ninja.d does not contain the ide or gn.
# Currently the ide is not run if the project.json file is not changed
# but the ide needs to be run anyway if it has itself changed.
# This can be worked around by deleting the project.json file.
out.write('file(READ "')
gn_deps_file = posixpath.join(project.build_path, 'build.ninja.d')
out.write(CMakeStringEscape(gn_deps_file))
out.write('" "gn_deps_string" OFFSET ')
out.write(str(len('build.ninja: ')))
out.write(')\n')
# One would think this would need to worry about escaped spaces
# but gn doesn't escape spaces here (it generates invalid .d files).
out.write('string(REPLACE " " ";" "gn_deps" ${gn_deps_string})\n')
out.write('foreach("gn_dep" ${gn_deps})\n')
out.write(' configure_file(${gn_dep} "CMakeLists.devnull" COPYONLY)\n')
out.write('endforeach("gn_dep")\n')
for target_name in project.targets.keys():
out.write('\n')
WriteTarget(out, Target(target_name, project), project)
def main():
if len(sys.argv) != 2:
print('Usage: ' + sys.argv[0] + ' <json_file_name>')
sys.exit(1)
json_path = sys.argv[1]
project = None
with open(json_path, 'r') as json_file:
project = json.loads(json_file.read())
WriteProject(Project(project))
if __name__ == "__main__":
main()
|
|
import pytest
import pandas as pd
import numpy as np
from pandas import SparseArray, SparseDtype
from pandas.errors import PerformanceWarning
from pandas.tests.extension import base
import pandas.util.testing as tm
def make_data(fill_value):
if np.isnan(fill_value):
data = np.random.uniform(size=100)
else:
data = np.random.randint(1, 100, size=100)
data[2::3] = fill_value
return data
@pytest.fixture
def dtype():
return SparseDtype()
@pytest.fixture(params=[0, np.nan])
def data(request):
"""Length-100 PeriodArray for semantics test."""
res = SparseArray(make_data(request.param),
fill_value=request.param)
return res
@pytest.fixture(params=[0, np.nan])
def data_missing(request):
"""Length 2 array with [NA, Valid]"""
return SparseArray([np.nan, 1], fill_value=request.param)
@pytest.fixture(params=[0, np.nan])
def data_repeated(request):
"""Return different versions of data for count times"""
def gen(count):
for _ in range(count):
yield SparseArray(make_data(request.param),
fill_value=request.param)
yield gen
@pytest.fixture(params=[0, np.nan])
def data_for_sorting(request):
return SparseArray([2, 3, 1], fill_value=request.param)
@pytest.fixture(params=[0, np.nan])
def data_missing_for_sorting(request):
return SparseArray([2, np.nan, 1], fill_value=request.param)
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def na_cmp():
return lambda left, right: pd.isna(left) and pd.isna(right)
@pytest.fixture(params=[0, np.nan])
def data_for_grouping(request):
return SparseArray([1, 1, np.nan, np.nan, 2, 2, 1, 3],
fill_value=request.param)
class BaseSparseTests(object):
def _check_unsupported(self, data):
if data.dtype == SparseDtype(int, 0):
pytest.skip("Can't store nan in int array.")
class TestDtype(BaseSparseTests, base.BaseDtypeTests):
def test_array_type_with_arg(self, data, dtype):
assert dtype.construct_array_type() is SparseArray
class TestInterface(BaseSparseTests, base.BaseInterfaceTests):
def test_no_values_attribute(self, data):
pytest.skip("We have values")
class TestConstructors(BaseSparseTests, base.BaseConstructorsTests):
pass
class TestReshaping(BaseSparseTests, base.BaseReshapingTests):
def test_concat_mixed_dtypes(self, data):
# https://github.com/pandas-dev/pandas/issues/20762
# This should be the same, aside from concat([sparse, float])
df1 = pd.DataFrame({'A': data[:3]})
df2 = pd.DataFrame({"A": [1, 2, 3]})
df3 = pd.DataFrame({"A": ['a', 'b', 'c']}).astype('category')
dfs = [df1, df2, df3]
# dataframes
result = pd.concat(dfs)
expected = pd.concat([x.apply(lambda s: np.asarray(s).astype(object))
for x in dfs])
self.assert_frame_equal(result, expected)
def test_concat_columns(self, data, na_value):
self._check_unsupported(data)
super(TestReshaping, self).test_concat_columns(data, na_value)
def test_align(self, data, na_value):
self._check_unsupported(data)
super(TestReshaping, self).test_align(data, na_value)
def test_align_frame(self, data, na_value):
self._check_unsupported(data)
super(TestReshaping, self).test_align_frame(data, na_value)
def test_align_series_frame(self, data, na_value):
self._check_unsupported(data)
super(TestReshaping, self).test_align_series_frame(data, na_value)
def test_merge(self, data, na_value):
self._check_unsupported(data)
super(TestReshaping, self).test_merge(data, na_value)
class TestGetitem(BaseSparseTests, base.BaseGetitemTests):
def test_get(self, data):
s = pd.Series(data, index=[2 * i for i in range(len(data))])
if np.isnan(s.values.fill_value):
assert np.isnan(s.get(4)) and np.isnan(s.iloc[2])
else:
assert s.get(4) == s.iloc[2]
assert s.get(2) == s.iloc[1]
def test_reindex(self, data, na_value):
self._check_unsupported(data)
super(TestGetitem, self).test_reindex(data, na_value)
# Skipping TestSetitem, since we don't implement it.
class TestMissing(BaseSparseTests, base.BaseMissingTests):
def test_isna(self, data_missing):
expected_dtype = SparseDtype(bool,
pd.isna(data_missing.dtype.fill_value))
expected = SparseArray([True, False], dtype=expected_dtype)
result = pd.isna(data_missing)
self.assert_equal(result, expected)
result = pd.Series(data_missing).isna()
expected = pd.Series(expected)
self.assert_series_equal(result, expected)
# GH 21189
result = pd.Series(data_missing).drop([0, 1]).isna()
expected = pd.Series([], dtype=expected_dtype)
self.assert_series_equal(result, expected)
def test_fillna_limit_pad(self, data_missing):
with tm.assert_produces_warning(PerformanceWarning):
super(TestMissing, self).test_fillna_limit_pad(data_missing)
def test_fillna_limit_backfill(self, data_missing):
with tm.assert_produces_warning(PerformanceWarning):
super(TestMissing, self).test_fillna_limit_backfill(data_missing)
def test_fillna_series_method(self, data_missing):
with tm.assert_produces_warning(PerformanceWarning):
super(TestMissing, self).test_fillna_limit_backfill(data_missing)
@pytest.mark.skip(reason="Unsupported")
def test_fillna_series(self):
# this one looks doable.
pass
def test_fillna_frame(self, data_missing):
# Have to override to specify that fill_value will change.
fill_value = data_missing[1]
result = pd.DataFrame({
"A": data_missing,
"B": [1, 2]
}).fillna(fill_value)
if pd.isna(data_missing.fill_value):
dtype = SparseDtype(data_missing.dtype, fill_value)
else:
dtype = data_missing.dtype
expected = pd.DataFrame({
"A": data_missing._from_sequence([fill_value, fill_value],
dtype=dtype),
"B": [1, 2],
})
self.assert_frame_equal(result, expected)
class TestMethods(BaseSparseTests, base.BaseMethodsTests):
def test_combine_le(self, data_repeated):
# We return a Series[SparseArray].__le__ returns a
# Series[Sparse[bool]]
# rather than Series[bool]
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series(pd.SparseArray([
a <= b for (a, b) in
zip(list(orig_data1), list(orig_data2))
], fill_value=False))
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
expected = pd.Series(pd.SparseArray([
a <= val for a in list(orig_data1)
], fill_value=False))
self.assert_series_equal(result, expected)
class TestCasting(BaseSparseTests, base.BaseCastingTests):
pass
class TestArithmeticOps(BaseSparseTests, base.BaseArithmeticOpsTests):
series_scalar_exc = None
frame_scalar_exc = None
divmod_exc = None
series_array_exc = None
def _skip_if_different_combine(self, data):
if data.fill_value == 0:
# arith ops call on dtype.fill_value so that the sparsity
# is maintained. Combine can't be called on a dtype in
# general, so we can't make the expected. This is tested elsewhere
raise pytest.skip("Incorrected expected from Series.combine")
def test_error(self, data, all_arithmetic_operators):
pass
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
self._skip_if_different_combine(data)
super(TestArithmeticOps, self).test_arith_series_with_scalar(
data,
all_arithmetic_operators
)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
self._skip_if_different_combine(data)
super(TestArithmeticOps, self).test_arith_series_with_array(
data,
all_arithmetic_operators
)
class TestComparisonOps(BaseSparseTests, base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
# hard to test the fill value, since we don't know what expected
# is in general.
# Rely on tests in `tests/sparse` to validate that.
assert isinstance(result.dtype, SparseDtype)
assert result.dtype.subtype == np.dtype('bool')
with np.errstate(all='ignore'):
expected = pd.Series(
pd.SparseArray(op(np.asarray(data), np.asarray(other)),
fill_value=result.values.fill_value)
)
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
tm.assert_series_equal(result, expected)
|
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for nova data.
"""
from sqlalchemy import Column, Index, Integer, BigInteger, Enum, String, schema
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.orm import relationship, backref, object_mapper
from oslo.config import cfg
from nova.db.sqlalchemy import types
from nova.openstack.common.db.sqlalchemy import models
from nova.openstack.common import timeutils
CONF = cfg.CONF
BASE = declarative_base()
def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql')
class NovaBase(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase):
metadata = None
def save(self, session=None):
from nova.db.sqlalchemy import api
if session is None:
session = api.get_session()
super(NovaBase, self).save(session=session)
class Service(BASE, NovaBase):
"""Represents a running service on a host."""
__tablename__ = 'services'
__table_args__ = (
schema.UniqueConstraint("host", "topic", "deleted",
name="uniq_services0host0topic0deleted"),
schema.UniqueConstraint("host", "binary", "deleted",
name="uniq_services0host0binary0deleted")
)
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
disabled_reason = Column(String(255))
class ComputeNode(BASE, NovaBase):
"""Represents a running compute service on a host."""
__tablename__ = 'compute_nodes'
__table_args__ = ()
id = Column(Integer, primary_key=True)
service_id = Column(Integer, ForeignKey('services.id'), nullable=False)
service = relationship(Service,
backref=backref('compute_node'),
foreign_keys=service_id,
primaryjoin='and_('
'ComputeNode.service_id == Service.id,'
'ComputeNode.deleted == 0)')
vcpus = Column(Integer, nullable=False)
memory_mb = Column(Integer, nullable=False)
local_gb = Column(Integer, nullable=False)
vcpus_used = Column(Integer, nullable=False)
memory_mb_used = Column(Integer, nullable=False)
local_gb_used = Column(Integer, nullable=False)
hypervisor_type = Column(MediumText(), nullable=False)
hypervisor_version = Column(Integer, nullable=False)
hypervisor_hostname = Column(String(255))
# Free Ram, amount of activity (resize, migration, boot, etc) and
# the number of running VM's are a good starting point for what's
# important when making scheduling decisions.
free_ram_mb = Column(Integer)
free_disk_gb = Column(Integer)
current_workload = Column(Integer)
running_vms = Column(Integer)
# Note(masumotok): Expected Strings example:
#
# '{"arch":"x86_64",
# "model":"Nehalem",
# "topology":{"sockets":1, "threads":2, "cores":3},
# "features":["tdtscp", "xtpr"]}'
#
# Points are "json translatable" and it must have all dictionary keys
# above, since it is copied from <cpu> tag of getCapabilities()
# (See libvirt.virtConnection).
cpu_info = Column(MediumText(), nullable=False)
disk_available_least = Column(Integer)
host_ip = Column(types.IPAddress())
supported_instances = Column(Text)
metrics = Column(Text)
# Note(yongli): json string PCI Stats
# '{"vendor_id":"8086", "product_id":"1234", "count":3 }'
pci_stats = Column(Text)
# extra_resources is a json string containing arbitrary
# data about additional resources.
extra_resources = Column(Text)
# json-encode string containing compute node statistics
stats = Column(Text, default='{}')
class Certificate(BASE, NovaBase):
"""Represents a x509 certificate."""
__tablename__ = 'certificates'
__table_args__ = (
Index('certificates_project_id_deleted_idx', 'project_id', 'deleted'),
Index('certificates_user_id_deleted_idx', 'user_id', 'deleted')
)
id = Column(Integer, primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
file_name = Column(String(255))
class Instance(BASE, NovaBase):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
Index('uuid', 'uuid', unique=True),
Index('project_id', 'project_id'),
Index('instances_host_deleted_idx',
'host', 'deleted'),
Index('instances_reservation_id_idx',
'reservation_id'),
Index('instances_terminated_at_launched_at_idx',
'terminated_at', 'launched_at'),
Index('instances_uuid_deleted_idx',
'uuid', 'deleted'),
Index('instances_task_state_updated_at_idx',
'task_state', 'updated_at'),
Index('instances_host_node_deleted_idx',
'host', 'node', 'deleted'),
Index('instances_host_deleted_cleaned_idx',
'host', 'deleted', 'cleaned'),
)
injected_files = []
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for column in iter(object_mapper(self).columns):
key = column.name
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
if key == 'name':
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@property
def _extra_keys(self):
return ['name']
user_id = Column(String(255))
project_id = Column(String(255))
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
hostname = Column(String(255))
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(MediumText())
power_state = Column(Integer)
vm_state = Column(String(255))
task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
ephemeral_key_uuid = Column(String(36))
# This is not related to hostname, above. It refers
# to the nova node.
host = Column(String(255)) # , ForeignKey('hosts.id'))
# To identify the "ComputeNode" which the instance resides in.
# This equals to ComputeNode.hypervisor_hostname.
node = Column(String(255))
# *not* flavorid, this is the internal primary_key
instance_type_id = Column(Integer)
user_data = Column(MediumText())
reservation_id = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
availability_zone = Column(String(255))
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
# To remember on which host an instance booted.
# An instance may have moved to another host by live migration.
launched_on = Column(MediumText())
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
locked = Column(Boolean)
locked_by = Column(Enum('owner', 'admin'))
os_type = Column(String(255))
architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36))
root_device_name = Column(String(255))
default_ephemeral_device = Column(String(255))
default_swap_device = Column(String(255))
config_drive = Column(String(255))
# User editable field meant to represent what ip should be used
# to connect to the instance
access_ip_v4 = Column(types.IPAddress())
access_ip_v6 = Column(types.IPAddress())
auto_disk_config = Column(Boolean())
progress = Column(Integer)
# EC2 instance_initiated_shutdown_terminate
# True: -> 'terminate'
# False: -> 'stop'
# Note(maoy): currently Nova will always stop instead of terminate
# no matter what the flag says. So we set the default to False.
shutdown_terminate = Column(Boolean(), default=False)
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False)
# OpenStack compute cell name. This will only be set at the top of
# the cells tree and it'll be a full cell name such as 'api!hop1!hop2'
cell_name = Column(String(255))
internal_id = Column(Integer)
# Records whether an instance has been deleted from disk
cleaned = Column(Integer, default=0)
class InstanceInfoCache(BASE, NovaBase):
"""Represents a cache of information about an instance
"""
__tablename__ = 'instance_info_caches'
__table_args__ = (
schema.UniqueConstraint(
"instance_uuid",
name="uniq_instance_info_caches0instance_uuid"),)
id = Column(Integer, primary_key=True, autoincrement=True)
# text column used for storing a json object of network data for api
network_info = Column(MediumText())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
instance = relationship(Instance,
backref=backref('info_cache', uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceTypes(BASE, NovaBase):
"""Represents possible flavors for instances.
Note: instance_type and flavor are synonyms and the term instance_type is
deprecated and in the process of being removed.
"""
__tablename__ = "instance_types"
__table_args__ = (
schema.UniqueConstraint("flavorid", "deleted",
name="uniq_instance_types0flavorid0deleted"),
schema.UniqueConstraint("name", "deleted",
name="uniq_instance_types0name0deleted")
)
# Internal only primary key/id
id = Column(Integer, primary_key=True)
name = Column(String(255))
memory_mb = Column(Integer, nullable=False)
vcpus = Column(Integer, nullable=False)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
# Public facing id will be renamed public_id
flavorid = Column(String(255))
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, default=1)
vcpu_weight = Column(Integer)
disabled = Column(Boolean, default=False)
is_public = Column(Boolean, default=True)
instances = relationship(Instance,
backref=backref('instance_type'),
foreign_keys=id,
primaryjoin=id == Instance.instance_type_id)
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'volumes'
__table_args__ = (
Index('volumes_instance_uuid_idx', 'instance_uuid'),
)
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.volume_name_template % self.id
ec2_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
snapshot_id = Column(String(36))
host = Column(String(255))
size = Column(Integer)
availability_zone = Column(String(255))
instance_uuid = Column(String(36))
mountpoint = Column(String(255))
attach_time = Column(DateTime)
status = Column(String(255)) # TODO(vish): enum?
attach_status = Column(String(255)) # TODO(vish): enum
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
provider_location = Column(String(256))
provider_auth = Column(String(256))
volume_type_id = Column(Integer)
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
__table_args__ = (
schema.UniqueConstraint("project_id", "resource", "deleted",
name="uniq_quotas0project_id0resource0deleted"
),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class ProjectUserQuota(BASE, NovaBase):
"""Represents a single quota override for a user with in a project."""
__tablename__ = 'project_user_quotas'
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
__table_args__ = (
schema.UniqueConstraint("user_id", "project_id", "resource", "deleted",
name=uniq_name),
Index('project_user_quotas_project_id_deleted_idx',
'project_id', 'deleted'),
Index('project_user_quotas_user_id_deleted_idx',
'user_id', 'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
project_id = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class QuotaClass(BASE, NovaBase):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
__table_args__ = (
Index('ix_quota_classes_class_name', 'class_name'),
)
id = Column(Integer, primary_key=True)
class_name = Column(String(255))
resource = Column(String(255))
hard_limit = Column(Integer)
class QuotaUsage(BASE, NovaBase):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
__table_args__ = (
Index('ix_quota_usages_project_id', 'project_id'),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255), nullable=False)
in_use = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer)
class Reservation(BASE, NovaBase):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
__table_args__ = (
Index('ix_reservations_project_id', 'project_id'),
Index('reservations_uuid_idx', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255))
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
usage = relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == 0)')
class Snapshot(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'snapshots'
__table_args__ = ()
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
return CONF.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36), nullable=False)
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
scheduled_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
class BlockDeviceMapping(BASE, NovaBase):
"""Represents block device mapping that is defined by EC2."""
__tablename__ = "block_device_mapping"
__table_args__ = (
Index('snapshot_id', 'snapshot_id'),
Index('volume_id', 'volume_id'),
Index('block_device_mapping_instance_uuid_device_name_idx',
'instance_uuid', 'device_name'),
Index('block_device_mapping_instance_uuid_volume_id_idx',
'instance_uuid', 'volume_id'),
Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
#TODO(sshturm) Should be dropped. `virtual_name` was dropped
#in 186 migration,
#Duplicates `block_device_mapping_instance_uuid_device_name_idx` index.
Index("block_device_mapping_instance_uuid_virtual_name"
"_device_name_idx", 'instance_uuid', 'device_name'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = relationship(Instance,
backref=backref('block_device_mapping'),
foreign_keys=instance_uuid,
primaryjoin='and_(BlockDeviceMapping.'
'instance_uuid=='
'Instance.uuid,'
'BlockDeviceMapping.deleted=='
'0)')
source_type = Column(String(255))
destination_type = Column(String(255))
guest_format = Column(String(255))
device_type = Column(String(255))
disk_bus = Column(String(255))
boot_index = Column(Integer)
device_name = Column(String(255))
# default=False for compatibility of the existing code.
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
#TODO(sshturm) add default in db
delete_on_termination = Column(Boolean, default=False)
snapshot_id = Column(String(36))
volume_id = Column(String(36))
volume_size = Column(Integer)
image_id = Column(String(36))
# for no device to suppress devices.
no_device = Column(Boolean)
connection_info = Column(MediumText())
class IscsiTarget(BASE, NovaBase):
"""Represents an iscsi target for a given host."""
__tablename__ = 'iscsi_targets'
__table_args__ = (
Index('iscsi_targets_volume_id_fkey', 'volume_id'),
Index('iscsi_targets_host_idx', 'host'),
Index('iscsi_targets_host_volume_id_deleted_idx', 'host', 'volume_id',
'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
target_num = Column(Integer)
host = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'))
volume = relationship(Volume,
backref=backref('iscsi_target', uselist=False),
foreign_keys=volume_id,
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
'IscsiTarget.deleted==0)')
class SecurityGroupInstanceAssociation(BASE, NovaBase):
__tablename__ = 'security_group_instance_association'
__table_args__ = (
Index('security_group_instance_association_instance_uuid_idx',
'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
security_group_id = Column(Integer, ForeignKey('security_groups.id'))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
class SecurityGroup(BASE, NovaBase):
"""Represents a security group."""
__tablename__ = 'security_groups'
__table_args__ = (
Index('uniq_security_groups0project_id0name0deleted', 'project_id',
'name', 'deleted'),
)
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
instances = relationship(Instance,
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == 0,'
'SecurityGroup.deleted == 0)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == 0)',
backref='security_groups')
class SecurityGroupIngressRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True)
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
parent_group = relationship("SecurityGroup", backref="rules",
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
protocol = Column(String(255))
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
grantee_group = relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
class SecurityGroupIngressDefaultRule(BASE, NovaBase):
__tablename__ = 'security_group_default_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp" or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class ProviderFirewallRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh."""
__tablename__ = 'key_pairs'
__table_args__ = (
schema.UniqueConstraint("user_id", "name", "deleted",
name="uniq_key_pairs0user_id0name0deleted"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255))
user_id = Column(String(255))
fingerprint = Column(String(255))
public_key = Column(MediumText())
class Migration(BASE, NovaBase):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
__table_args__ = (
Index('migrations_instance_uuid_and_status_idx', 'instance_uuid',
'status'),
Index('migrations_by_host_nodes_and_status_idx', 'deleted',
'source_compute', 'dest_compute', 'source_node', 'dest_node',
'status'),
)
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
# nodes are equivalent to a compute node's 'hypervisor_hostname'
source_node = Column(String(255))
dest_node = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
#TODO(_cerberus_): enum
status = Column(String(255))
instance = relationship("Instance", foreign_keys=instance_uuid,
primaryjoin='and_(Migration.instance_uuid == '
'Instance.uuid, Instance.deleted == '
'0)')
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
__table_args__ = (
schema.UniqueConstraint("vlan", "deleted",
name="uniq_networks0vlan0deleted"),
Index('networks_bridge_deleted_idx', 'bridge', 'deleted'),
Index('networks_host_idx', 'host'),
Index('networks_project_id_deleted_idx', 'project_id', 'deleted'),
Index('networks_uuid_project_id_deleted_idx', 'uuid',
'project_id', 'deleted'),
Index('networks_vlan_deleted_idx', 'vlan', 'deleted'),
Index('networks_cidr_v6_idx', 'cidr_v6')
)
id = Column(Integer, primary_key=True, nullable=False)
label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(types.CIDR())
cidr_v6 = Column(types.CIDR())
multi_host = Column(Boolean, default=False)
gateway_v6 = Column(types.IPAddress())
netmask_v6 = Column(types.IPAddress())
netmask = Column(types.IPAddress())
bridge = Column(String(255))
bridge_interface = Column(String(255))
gateway = Column(types.IPAddress())
broadcast = Column(types.IPAddress())
dns1 = Column(types.IPAddress())
dns2 = Column(types.IPAddress())
vlan = Column(Integer)
vpn_public_address = Column(types.IPAddress())
vpn_public_port = Column(Integer)
vpn_private_address = Column(types.IPAddress())
dhcp_start = Column(types.IPAddress())
rxtx_base = Column(Integer)
project_id = Column(String(255))
priority = Column(Integer)
host = Column(String(255)) # , ForeignKey('hosts.id'))
uuid = Column(String(36))
class VirtualInterface(BASE, NovaBase):
"""Represents a virtual interface on an instance."""
__tablename__ = 'virtual_interfaces'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_virtual_interfaces0address0deleted"),
Index('network_id', 'network_id'),
Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
address = Column(String(255))
network_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
uuid = Column(String(36))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
__table_args__ = (
schema.UniqueConstraint(
"address", "deleted", name="uniq_fixed_ips0address0deleted"),
Index('fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'),
Index('network_id', 'network_id'),
Index('address', 'address'),
Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'),
Index('fixed_ips_host_idx', 'host'),
Index('fixed_ips_network_id_host_deleted_idx', 'network_id', 'host',
'deleted'),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
'address', 'reserved', 'network_id', 'deleted'),
Index('fixed_ips_deleted_allocated_idx', 'address', 'deleted',
'allocated')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
network_id = Column(Integer)
virtual_interface_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has its virtual_interface_id column set
#TODO(sshturm) add default in db
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
#TODO(sshturm) add default in db
leased = Column(Boolean, default=False)
#TODO(sshturm) add default in db
reserved = Column(Boolean, default=False)
host = Column(String(255))
network = relationship(Network,
backref=backref('fixed_ips'),
foreign_keys=network_id,
primaryjoin='and_('
'FixedIp.network_id == Network.id,'
'FixedIp.deleted == 0,'
'Network.deleted == 0)')
instance = relationship(Instance,
foreign_keys=instance_uuid,
primaryjoin='and_('
'FixedIp.instance_uuid == Instance.uuid,'
'FixedIp.deleted == 0,'
'Instance.deleted == 0)')
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_floating_ips0address0deleted"),
Index('fixed_ip_id', 'fixed_ip_id'),
Index('floating_ips_host_idx', 'host'),
Index('floating_ips_project_id_idx', 'project_id'),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
'pool', 'deleted', 'fixed_ip_id', 'project_id')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
fixed_ip_id = Column(Integer)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False)
#TODO(sshturm) add default in db
pool = Column(String(255))
interface = Column(String(255))
fixed_ip = relationship(FixedIp,
backref=backref('floating_ips'),
foreign_keys=fixed_ip_id,
primaryjoin='and_('
'FloatingIp.fixed_ip_id == FixedIp.id,'
'FloatingIp.deleted == 0,'
'FixedIp.deleted == 0)')
class DNSDomain(BASE, NovaBase):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
__table_args__ = (
Index('project_id', 'project_id'),
Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
)
deleted = Column(Boolean, default=False)
domain = Column(String(255), primary_key=True)
scope = Column(String(255))
availability_zone = Column(String(255))
project_id = Column(String(255))
class ConsolePool(BASE, NovaBase):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
__table_args__ = (
schema.UniqueConstraint(
"host", "console_type", "compute_host", "deleted",
name="uniq_console_pools0host0console_type0compute_host0deleted"),
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
username = Column(String(255))
password = Column(String(255))
console_type = Column(String(255))
public_hostname = Column(String(255))
host = Column(String(255))
compute_host = Column(String(255))
class Console(BASE, NovaBase):
"""Represents a console session for an instance."""
__tablename__ = 'consoles'
__table_args__ = (
Index('consoles_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
instance_name = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
password = Column(String(255))
port = Column(Integer)
pool_id = Column(Integer, ForeignKey('console_pools.id'))
pool = relationship(ConsolePool, backref=backref('consoles'))
class InstanceMetadata(BASE, NovaBase):
"""Represents a user-provided metadata key/value pair for an instance."""
__tablename__ = 'instance_metadata'
__table_args__ = (
Index('instance_metadata_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = relationship(Instance, backref="metadata",
foreign_keys=instance_uuid,
primaryjoin='and_('
'InstanceMetadata.instance_uuid == '
'Instance.uuid,'
'InstanceMetadata.deleted == 0)')
class InstanceSystemMetadata(BASE, NovaBase):
"""Represents a system-owned metadata key/value pair for an instance."""
__tablename__ = 'instance_system_metadata'
__table_args__ = ()
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
primary_join = ('and_(InstanceSystemMetadata.instance_uuid == '
'Instance.uuid, InstanceSystemMetadata.deleted == 0)')
instance = relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid,
primaryjoin=primary_join)
class InstanceTypeProjects(BASE, NovaBase):
"""Represent projects associated instance_types."""
__tablename__ = "instance_type_projects"
__table_args__ = (schema.UniqueConstraint(
"instance_type_id", "project_id", "deleted",
name="uniq_instance_type_projects0instance_type_id0project_id0deleted"
),
)
id = Column(Integer, primary_key=True)
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
project_id = Column(String(255))
instance_type = relationship(InstanceTypes, backref="projects",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeProjects.instance_type_id == InstanceTypes.id,'
'InstanceTypeProjects.deleted == 0)')
class InstanceTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for an instance_type."""
__tablename__ = 'instance_type_extra_specs'
__table_args__ = (
Index('instance_type_extra_specs_instance_type_id_key_idx',
'instance_type_id', 'key'),
schema.UniqueConstraint(
"instance_type_id", "key", "deleted",
name=("uniq_instance_type_extra_specs0"
"instance_type_id0key0deleted")
),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == 0)')
class Cell(BASE, NovaBase):
"""Represents parent and child cells of this cell. Cells can
have multiple parents and children, so there could be any number
of entries with is_parent=True or False
"""
__tablename__ = 'cells'
__table_args__ = (schema.UniqueConstraint(
"name", "deleted", name="uniq_cells0name0deleted"
),
)
id = Column(Integer, primary_key=True)
# Name here is the 'short name' of a cell. For instance: 'child1'
name = Column(String(255))
api_url = Column(String(255))
transport_url = Column(String(255), nullable=False)
weight_offset = Column(Float(), default=0.0)
weight_scale = Column(Float(), default=1.0)
is_parent = Column(Boolean())
class AggregateHost(BASE, NovaBase):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
__table_args__ = (schema.UniqueConstraint(
"host", "aggregate_id", "deleted",
name="uniq_aggregate_hosts0host0aggregate_id0deleted"
),
)
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255))
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(BASE, NovaBase):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
__table_args__ = (
schema.UniqueConstraint("aggregate_id", "key", "deleted",
name="uniq_aggregate_metadata0aggregate_id0key0deleted"
),
Index('aggregate_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class Aggregate(BASE, NovaBase):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
__table_args__ = ()
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
_hosts = relationship(AggregateHost,
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == 0,'
'Aggregate.deleted == 0)')
_metadata = relationship(AggregateMetadata,
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == 0,'
'Aggregate.deleted == 0)')
@property
def _extra_keys(self):
return ['hosts', 'metadetails', 'availability_zone']
@property
def hosts(self):
return [h.host for h in self._hosts]
@property
def metadetails(self):
return dict([(m.key, m.value) for m in self._metadata])
@property
def availability_zone(self):
if 'availability_zone' not in self.metadetails:
return None
return self.metadetails['availability_zone']
class AgentBuild(BASE, NovaBase):
"""Represents an agent build."""
__tablename__ = 'agent_builds'
__table_args__ = (
Index('agent_builds_hypervisor_os_arch_idx', 'hypervisor', 'os',
'architecture'),
schema.UniqueConstraint("hypervisor", "os", "architecture", "deleted",
name="uniq_agent_builds0hypervisor0os0architecture0deleted"),
)
id = Column(Integer, primary_key=True)
hypervisor = Column(String(255))
os = Column(String(255))
architecture = Column(String(255))
version = Column(String(255))
url = Column(String(255))
md5hash = Column(String(255))
class BandwidthUsage(BASE, NovaBase):
"""Cache for instance bandwidth usage data pulled from the hypervisor."""
__tablename__ = 'bw_usage_cache'
__table_args__ = (
Index('bw_usage_cache_uuid_start_period_idx', 'uuid',
'start_period'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36))
mac = Column(String(255))
start_period = Column(DateTime, nullable=False)
last_refreshed = Column(DateTime)
bw_in = Column(BigInteger)
bw_out = Column(BigInteger)
last_ctr_in = Column(BigInteger)
last_ctr_out = Column(BigInteger)
class VolumeUsage(BASE, NovaBase):
"""Cache for volume usage data pulled from the hypervisor."""
__tablename__ = 'volume_usage_cache'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), nullable=False)
instance_uuid = Column(String(36))
project_id = Column(String(36))
user_id = Column(String(36))
availability_zone = Column(String(255))
tot_last_refreshed = Column(DateTime)
tot_reads = Column(BigInteger, default=0)
tot_read_bytes = Column(BigInteger, default=0)
tot_writes = Column(BigInteger, default=0)
tot_write_bytes = Column(BigInteger, default=0)
curr_last_refreshed = Column(DateTime)
curr_reads = Column(BigInteger, default=0)
curr_read_bytes = Column(BigInteger, default=0)
curr_writes = Column(BigInteger, default=0)
curr_write_bytes = Column(BigInteger, default=0)
class S3Image(BASE, NovaBase):
"""Compatibility layer for the S3 image service talking to Glance."""
__tablename__ = 's3_images'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 volume service."""
__tablename__ = 'volume_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 snapshot service."""
__tablename__ = 'snapshot_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class InstanceFault(BASE, NovaBase):
__tablename__ = 'instance_faults'
__table_args__ = (
Index('instance_faults_host_idx', 'host'),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
'instance_uuid', 'deleted', 'created_at')
)
id = Column(Integer, primary_key=True, nullable=False)
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
code = Column(Integer(), nullable=False)
message = Column(String(255))
details = Column(MediumText())
host = Column(String(255))
class InstanceAction(BASE, NovaBase):
"""Track client actions on an instance.
The intention is that there will only be one of these per user request. A
lookup by (instance_uuid, request_id) should always return a single result.
"""
__tablename__ = 'instance_actions'
__table_args__ = (
Index('instance_uuid_idx', 'instance_uuid'),
Index('request_id_idx', 'request_id')
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
action = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
request_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
message = Column(String(255))
class InstanceActionEvent(BASE, NovaBase):
"""Track events that occur during an InstanceAction."""
__tablename__ = 'instance_actions_events'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
event = Column(String(255))
action_id = Column(Integer, ForeignKey('instance_actions.id'))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
result = Column(String(255))
traceback = Column(Text)
host = Column(String(255))
details = Column(Text)
class InstanceIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 instance service."""
__tablename__ = 'instance_id_mappings'
__table_args__ = (
Index('ix_instance_id_mappings_uuid', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class TaskLog(BASE, NovaBase):
"""Audit log for background periodic tasks."""
__tablename__ = 'task_log'
__table_args__ = (
schema.UniqueConstraint(
'task_name', 'host', 'period_beginning', 'period_ending',
name="uniq_task_log0task_name0host0period_beginning0period_ending"
),
Index('ix_task_log_period_beginning', 'period_beginning'),
Index('ix_task_log_host', 'host'),
Index('ix_task_log_period_ending', 'period_ending'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
task_name = Column(String(255), nullable=False)
state = Column(String(255), nullable=False)
host = Column(String(255), nullable=False)
period_beginning = Column(DateTime, default=timeutils.utcnow,
nullable=False)
period_ending = Column(DateTime, default=timeutils.utcnow,
nullable=False)
message = Column(String(255), nullable=False)
task_items = Column(Integer(), default=0)
errors = Column(Integer(), default=0)
class InstanceGroupMember(BASE, NovaBase):
"""Represents the members for an instance group."""
__tablename__ = 'instance_group_member'
__table_args__ = (
Index('instance_group_member_instance_idx', 'instance_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
instance_id = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupPolicy(BASE, NovaBase):
"""Represents the policy type for an instance group."""
__tablename__ = 'instance_group_policy'
__table_args__ = (
Index('instance_group_policy_policy_idx', 'policy'),
)
id = Column(Integer, primary_key=True, nullable=False)
policy = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupMetadata(BASE, NovaBase):
"""Represents a key/value pair for an instance group."""
__tablename__ = 'instance_group_metadata'
__table_args__ = (
Index('instance_group_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True, nullable=False)
key = Column(String(255))
value = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroup(BASE, NovaBase):
"""Represents an instance group.
A group will maintain a collection of instances and the relationship
between them.
"""
__tablename__ = 'instance_groups'
__table_args__ = (
schema.UniqueConstraint("uuid", "deleted",
name="uniq_instance_groups0uuid0deleted"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(String(255))
project_id = Column(String(255))
uuid = Column(String(36), nullable=False)
name = Column(String(255))
_policies = relationship(InstanceGroupPolicy, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupPolicy.group_id,'
'InstanceGroupPolicy.deleted == 0,'
'InstanceGroup.deleted == 0)')
_metadata = relationship(InstanceGroupMetadata, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMetadata.group_id,'
'InstanceGroupMetadata.deleted == 0,'
'InstanceGroup.deleted == 0)')
_members = relationship(InstanceGroupMember, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMember.group_id,'
'InstanceGroupMember.deleted == 0,'
'InstanceGroup.deleted == 0)')
@property
def policies(self):
return [p.policy for p in self._policies]
@property
def metadetails(self):
return dict((m.key, m.value) for m in self._metadata)
@property
def members(self):
return [m.instance_id for m in self._members]
class PciDevice(BASE, NovaBase):
"""Represents a PCI host device that can be passed through to instances.
"""
__tablename__ = 'pci_devices'
__table_args__ = (
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
schema.UniqueConstraint(
"compute_node_id", "address", "deleted",
name="uniq_pci_devices0compute_node_id0address0deleted")
)
id = Column(Integer, primary_key=True)
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
# physical address of device domain:bus:slot.func (0000:09:01.1)
address = Column(String(12), nullable=False)
vendor_id = Column(String(4), nullable=False)
product_id = Column(String(4), nullable=False)
dev_type = Column(String(8), nullable=False)
dev_id = Column(String(255))
# label is abstract device name, that is used to unify devices with the
# same functionality with different addresses or host.
label = Column(String(255), nullable=False)
status = Column(String(36), nullable=False)
extra_info = Column(Text)
instance_uuid = Column(String(36))
instance = relationship(Instance, backref="pci_devices",
foreign_keys=instance_uuid,
primaryjoin='and_('
'PciDevice.instance_uuid == Instance.uuid,'
'PciDevice.deleted == 0)')
|
|
"""Appcommands-compatible command class with extra fixins."""
from __future__ import print_function
import cmd
import inspect
import pdb
import shlex
import sys
import traceback
import types
import six
from google.apputils import app
from google.apputils import appcommands
import gflags as flags
__all__ = [
'NewCmd',
'Repl',
]
flags.DEFINE_boolean(
'debug_mode', False,
'Show tracebacks on Python exceptions.')
flags.DEFINE_boolean(
'headless', False,
'Assume no user is at the controlling console.')
FLAGS = flags.FLAGS
def _SafeMakeAscii(s):
if isinstance(s, six.text_type):
return s.encode('ascii')
elif isinstance(s, str):
return s.decode('ascii')
else:
return six.text_type(s).encode('ascii', 'backslashreplace')
class NewCmd(appcommands.Cmd):
"""Featureful extension of appcommands.Cmd."""
def __init__(self, name, flag_values):
super(NewCmd, self).__init__(name, flag_values)
run_with_args = getattr(self, 'RunWithArgs', None)
self._new_style = isinstance(run_with_args, types.MethodType)
if self._new_style:
func = run_with_args.__func__
argspec = inspect.getargspec(func)
if argspec.args and argspec.args[0] == 'self':
argspec = argspec._replace( # pylint: disable=protected-access
args=argspec.args[1:])
self._argspec = argspec
# TODO(user): Do we really want to support all this
# nonsense?
self._star_args = self._argspec.varargs is not None
self._star_kwds = self._argspec.keywords is not None
self._max_args = len(self._argspec.args or ())
self._min_args = self._max_args - len(self._argspec.defaults or ())
if self._star_args:
self._max_args = sys.maxint
self._debug_mode = FLAGS.debug_mode
self.surface_in_shell = True
self.__doc__ = self.RunWithArgs.__doc__
def __getattr__(self, name):
if name in self._command_flags:
return self._command_flags[name].value
return super(NewCmd, self).__getattribute__(name)
def _GetFlag(self, flagname):
if flagname in self._command_flags:
return self._command_flags[flagname]
else:
return None
def Run(self, argv):
"""Run this command.
If self is a new-style command, we set up arguments and call
self.RunWithArgs, gracefully handling exceptions. If not, we
simply call self.Run(argv).
Args:
argv: List of arguments as strings.
Returns:
0 on success, nonzero on failure.
"""
if not self._new_style:
return super(NewCmd, self).Run(argv)
# TODO(user): We need to save and restore flags each time so
# that we can per-command flags in the REPL.
args = argv[1:]
fail = None
if len(args) < self._min_args:
fail = 'Not enough positional args; found %d, expected at least %d' % (
len(args), self._min_args)
if len(args) > self._max_args:
fail = 'Too many positional args; found %d, expected at most %d' % (
len(args), self._max_args)
if fail:
print(fail)
if self.usage:
print('Usage: %s' % (self.usage,))
return 1
if self._debug_mode:
return self.RunDebug(args, {})
else:
return self.RunSafely(args, {})
def RunCmdLoop(self, argv):
"""Hook for use in cmd.Cmd-based command shells."""
try:
args = shlex.split(argv)
except ValueError as e:
raise SyntaxError(self.EncodeForPrinting(e))
return self.Run([self._command_name] + args)
@staticmethod
def EncodeForPrinting(s):
"""Safely encode a string as the encoding for sys.stdout."""
encoding = sys.stdout.encoding or 'ascii'
return six.text_type(s).encode(encoding, 'backslashreplace')
def _FormatError(self, e):
"""Hook for subclasses to modify how error messages are printed."""
return _SafeMakeAscii(e)
def _HandleError(self, e):
message = self._FormatError(e)
print('Exception raised in %s operation: %s' % (
self._command_name, message))
return 1
def _IsDebuggableException(self, e):
"""Hook for subclasses to skip debugging on certain exceptions."""
return not isinstance(e, app.UsageError)
def RunDebug(self, args, kwds):
"""Run this command in debug mode."""
try:
return_value = self.RunWithArgs(*args, **kwds)
except BaseException as e:
# Don't break into the debugger for expected exceptions.
if not self._IsDebuggableException(e):
return self._HandleError(e)
print()
print('****************************************************')
print('** Unexpected Exception raised in execution! **')
if FLAGS.headless:
print('** --headless mode enabled, exiting. **')
print('** See STDERR for traceback. **')
else:
print('** --debug_mode enabled, starting pdb. **')
print('****************************************************')
print()
traceback.print_exc()
print()
if not FLAGS.headless:
pdb.post_mortem()
return 1
return return_value
def RunSafely(self, args, kwds):
"""Run this command, turning exceptions into print statements."""
try:
return_value = self.RunWithArgs(*args, **kwds)
except BaseException as e:
return self._HandleError(e)
return return_value
# pylint: disable=g-bad-name
class CommandLoop(cmd.Cmd):
"""Instance of cmd.Cmd built to work with NewCmd."""
class TerminateSignal(Exception):
"""Exception type used for signaling loop completion."""
def __init__(self, commands, prompt):
cmd.Cmd.__init__(self)
self._commands = {'help': commands['help']}
self._special_command_names = ['help', 'repl', 'EOF']
for name, command in six.iteritems(commands):
if (name not in self._special_command_names and
isinstance(command, NewCmd) and
command.surface_in_shell):
self._commands[name] = command
setattr(self, 'do_%s' % (name,), command.RunCmdLoop)
self._default_prompt = prompt
self._set_prompt()
self._last_return_code = 0
@property
def last_return_code(self):
return self._last_return_code
def _set_prompt(self):
self.prompt = self._default_prompt
def do_EOF(self, *unused_args):
"""Terminate the running command loop.
This function raises an exception to avoid the need to do
potentially-error-prone string parsing inside onecmd.
Args:
*unused_args: unused.
Returns:
Never returns.
Raises:
CommandLoop.TerminateSignal: always.
"""
raise CommandLoop.TerminateSignal()
def postloop(self):
print('Goodbye.')
def completedefault(self, unused_text, line, unused_begidx, unused_endidx):
if not line:
return []
else:
command_name = line.partition(' ')[0].lower()
usage = ''
if command_name in self._commands:
usage = self._commands[command_name].usage
if usage:
print()
print(usage)
print('%s%s' % (self.prompt, line), end=' ')
return []
def emptyline(self):
print('Available commands:', end=' ')
print(' '.join(list(self._commands)))
def precmd(self, line):
"""Preprocess the shell input."""
if line == 'EOF':
return line
if line.startswith('exit') or line.startswith('quit'):
return 'EOF'
words = line.strip().split()
if len(words) == 1 and words[0] not in ['help', 'ls', 'version']:
return 'help %s' % (line.strip(),)
return line
def onecmd(self, line):
"""Process a single command.
Runs a single command, and stores the return code in
self._last_return_code. Always returns False unless the command
was EOF.
Args:
line: (str) Command line to process.
Returns:
A bool signaling whether or not the command loop should terminate.
"""
try:
self._last_return_code = cmd.Cmd.onecmd(self, line)
except CommandLoop.TerminateSignal:
return True
except BaseException as e:
name = line.split(' ')[0]
print('Error running %s:' % name)
print(e)
self._last_return_code = 1
return False
def get_names(self):
names = dir(self)
commands = (name for name in self._commands
if name not in self._special_command_names)
names.extend('do_%s' % (name,) for name in commands)
names.remove('do_EOF')
return names
def do_help(self, command_name):
"""Print the help for command_name (if present) or general help."""
# TODO(user): Add command-specific flags.
def FormatOneCmd(name, command, command_names):
indent_size = appcommands.GetMaxCommandLength() + 3
if len(command_names) > 1:
indent = ' ' * indent_size
command_help = flags.TextWrap(
command.CommandGetHelp('', cmd_names=command_names),
indent=indent,
firstline_indent='')
first_help_line, _, rest = command_help.partition('\n')
first_line = '%-*s%s' % (indent_size, name + ':', first_help_line)
return '\n'.join((first_line, rest))
else:
default_indent = ' '
return '\n' + flags.TextWrap(
command.CommandGetHelp('', cmd_names=command_names),
indent=default_indent,
firstline_indent=default_indent) + '\n'
if not command_name:
print('\nHelp for commands:\n')
command_names = list(self._commands)
print('\n\n'.join(
FormatOneCmd(name, command, command_names)
for name, command in six.iteritems(self._commands)
if name not in self._special_command_names))
print()
elif command_name in self._commands:
print(FormatOneCmd(command_name, self._commands[command_name],
command_names=[command_name]))
return 0
def postcmd(self, stop, line):
return bool(stop) or line == 'EOF'
# pylint: enable=g-bad-name
class Repl(NewCmd):
"""Start an interactive session."""
PROMPT = '> '
def __init__(self, name, fv):
super(Repl, self).__init__(name, fv)
self.surface_in_shell = False
flags.DEFINE_string(
'prompt', '',
'Prompt to use for interactive shell.',
flag_values=fv)
def RunWithArgs(self):
"""Start an interactive session."""
prompt = FLAGS.prompt or self.PROMPT
repl = CommandLoop(appcommands.GetCommandList(), prompt=prompt)
print('Welcome! (Type help for more information.)')
while True:
try:
repl.cmdloop()
break
except KeyboardInterrupt:
print()
return repl.last_return_code
|
|
'''
// Copyright 2008 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Original Author: Not stated
//
// Notification of Change: The original java source code has been
// modified in that it has been rewritten in the python programming
// language and additionally, may contain components and ideas that are
// not found in the original source code.
Copyright 2013 Neil Borle and Paul Lu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2013-06-06
@author: Neil Borle
'''
import math
from RendererObjectManager import RendererObjectManager
from src.rendererUtil.VertexBuffer import VertexBuffer
from src.rendererUtil.TextCoordBuffer import TextCoordBuffer
from src.rendererUtil.NightVisionColorBuffer import NightVisionBuffer
from src.rendererUtil.IndexBuffer import IndexBuffer
from src.rendererUtil.TextureManager import TextureManager
from src.utils.VectorUtil import difference, sum_vectors, normalized, cross_product
from src.utils.DebugOptions import Debug
DRAWABLE_LINE = int("0x7f02003a", 0)
class PolyLineObjectManager(RendererObjectManager):
'''
Manages the rendering of lines by loading points and lines
into glbuffers
'''
def update_objects(self, lines, update_type):
# We only care about updates to positions, ignore any other updates.
if not (self.update_type.Reset in update_type) and \
not (self.update_type.UpdatePositions in update_type):
return
num_line_segments = 0;
for l_source in lines:
num_line_segments += len(l_source.gc_vertices) - 1
# To render everything in one call, we render everything as a line list
# rather than a series of line strips.
num_vertices = 4 * num_line_segments
num_indices = 6 * num_line_segments
vb = self.vertex_buffer
vb.reset(4 * num_line_segments)
cb = self.color_buffer
cb.reset(4 * num_line_segments)
tb = self.text_coord_buffer
tb.reset(num_vertices)
ib = self.index_buffer
ib.reset(num_indices)
# See comment in PointObjectManager for justification of this calculation.
fovy_in_radians = 60 * math.pi / 180.0
size_factor = math.tan(fovy_in_radians * 0.5) / 480.0
bool_opaque = True
vertex_index = 0
for l_source in lines:
coords_list = l_source.gc_vertices
if len(coords_list) < 2:
continue
# If the color isn't fully opaque, set opaque to false.
color = l_source.color
bool_opaque &= int(color & 0xff000000) == 0xff000000
# Add the vertices.
for i in range(0, len(coords_list) - 1):
p1 = coords_list[i]
p2 = coords_list[i+1]
u = difference(p2, p1)
# The normal to the quad should face the origin at its midpoint.
avg = sum_vectors(p1, p2)
avg.scale(0.5)
# I'm assum_vectorsing that the points will already be on a unit sphere. If this is not the case,
# then we should normalize it here.
v = normalized(cross_product(u, avg))
v.scale(size_factor * l_source.line_width)
# Add the vertices
# Lower left corner
vb.add_point(difference(p1, v))
cb.add_color(color)
tb.add_text_coord(0, 1)
# Upper left corner
vb.add_point(sum_vectors(p1, v))
cb.add_color(color)
tb.add_text_coord(0, 0)
# Lower left corner
vb.add_point(difference(p2, v))
cb.add_color(color)
tb.add_text_coord(1, 1)
# Upper left corner
vb.add_point(sum_vectors(p2, v))
cb.add_color(color)
tb.add_text_coord(1, 0)
# Add the indices
bottom_left = vertex_index
top_left = vertex_index + 1
bottom_right = vertex_index +2
top_right = vertex_index + 3
vertex_index += 4
# First triangle
ib.add_index(bottom_left)
ib.add_index(top_left)
ib.add_index(bottom_right)
# Second triangle
ib.add_index(bottom_right)
ib.add_index(top_left)
ib.add_index(top_right)
self.opaque = bool_opaque
def reload(self, gl, full_reload=False):
TM = TextureManager()
self.texture_ref = TM.get_texture_from_resource(gl, DRAWABLE_LINE)
self.vertex_buffer.reload()
self.color_buffer.reload()
self.text_coord_buffer.reload()
self.index_buffer.reload()
def draw_internal(self, gl):
if Debug.DRAWING == "POINTS ONLY": return
if self.index_buffer.num_indices == 0:
return
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY)
gl.glEnable(gl.GL_TEXTURE_2D)
self.texture_ref.bind(gl)
gl.glEnable(gl.GL_CULL_FACE)
gl.glFrontFace(gl.GL_CW)
gl.glCullFace(gl.GL_BACK)
if not self.opaque:
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glTexEnvf(gl.GL_TEXTURE_ENV, gl.GL_TEXTURE_ENV_MODE, gl.GL_MODULATE);
self.vertex_buffer.set(gl)
self.color_buffer.set(gl, self.render_state.night_vision_mode)
self.text_coord_buffer.set(gl)
self.index_buffer.draw(gl, gl.GL_TRIANGLES)
if not self.opaque:
gl.glDisable(gl.GL_BLEND)
gl.glDisable(gl.GL_TEXTURE_2D)
gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY)
def __init__(self, new_layer, new_texture_manager):
'''
Constructor
'''
RendererObjectManager.__init__(self, new_layer, new_texture_manager)
self.vertex_buffer = VertexBuffer(True)
self.color_buffer = NightVisionBuffer(True)
self.text_coord_buffer = TextCoordBuffer(True)
self.index_buffer = IndexBuffer(True)
self.texture_ref = None
self.opaque = True
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modules/certificate/."""
__author__ = 'John Orr ([email protected])'
import actions
from controllers import sites
from models import courses
from models import models
from models import student_work
from modules.certificate import certificate
from modules.certificate import custom_criteria
from modules.review import domain
from modules.review import peer
from modules.review import review as review_module
from google.appengine.api import namespace_manager
from google.appengine.ext import db
class MockHandler(object):
def gettext(self, text):
return text
class CertificateHandlerTestCase(actions.TestBase):
"""Tests for the handler which presents the certificate."""
def setUp(self):
super(CertificateHandlerTestCase, self).setUp()
# Mock the module's student_is_qualified method
self.is_qualified = True
self.original_student_is_qualified = certificate.student_is_qualified
certificate.student_is_qualified = (
lambda student, course: self.is_qualified)
def tearDown(self):
certificate.student_is_qualified = self.original_student_is_qualified
super(CertificateHandlerTestCase, self).tearDown()
def test_student_must_be_enrolled(self):
# If student not in session, expect redirect
response = self.get('/certificate')
self.assertEquals(302, response.status_code)
# If student is not enrolled, expect redirect
actions.login('[email protected]')
response = self.get('/certificate')
self.assertEquals(302, response.status_code)
self.assertEquals(
'http://localhost/preview', response.headers['Location'])
# If the student is enrolled, expect certificate
models.Student.add_new_student_for_current_user('Test User', None, self)
response = self.get('/certificate')
self.assertEquals(200, response.status_code)
def test_student_must_be_qualified(self):
actions.login('[email protected]')
models.Student.add_new_student_for_current_user('Test User', None, self)
# If student is not qualified, expect redirect to home page
self.is_qualified = False
response = self.get('/certificate')
self.assertEquals(302, response.status_code)
self.assertEquals('http://localhost/', response.headers['Location'])
# If student is qualified, expect certificate
self.is_qualified = True
response = self.get('/certificate')
self.assertEquals(200, response.status_code)
def test_certificate_should_have_student_nickname(self):
actions.login('[email protected]')
models.Student.add_new_student_for_current_user('Jane Doe', None, self)
response = self.get('/certificate')
self.assertEquals(200, response.status_code)
self.assertIn('Jane Doe', response.body)
def test_download_pdf(self):
actions.login('[email protected]')
models.Student.add_new_student_for_current_user('Test User', None, self)
response = self.get('/certificate.pdf')
self.assertEqual('application/pdf', response.headers['Content-Type'])
self.assertEqual(
'attachment; filename=certificate.pdf',
response.headers['Content-Disposition'])
self.assertIn('/Title (Course Builder Certificate)', response.body)
def test_certificate_table_entry(self):
user = actions.login('[email protected]')
models.Student.add_new_student_for_current_user('Test User', None, self)
student = models.Student.get_by_user(user)
all_courses = sites.get_all_courses()
app_context = all_courses[0]
course = courses.Course(None, app_context=app_context)
# If the student is qualified, a link is shown
self.is_qualified = True
mock_handler = MockHandler()
table_entry = certificate.get_certificate_table_entry(
mock_handler, student, course)
self.assertEquals('Certificate', table_entry[0])
link = str(table_entry[1])
self.assertEquals(
'<a href="certificate">Click for certificate</a> '
'| <a href="certificate.pdf">Download PDF</a>', link)
# If the student is not qualified, a message is shown
self.is_qualified = False
table_entry = certificate.get_certificate_table_entry(
mock_handler, student, course)
self.assertEquals('Certificate', table_entry[0])
self.assertIn(
'You have not yet met the course requirements', table_entry[1])
class CertificateCriteriaTestCase(actions.TestBase):
"""Tests the different certificate criteria configurations."""
COURSE_NAME = 'certificate_criteria'
STUDENT_EMAIL = '[email protected]'
TEST_USER = None
ADMIN_EMAIL = '[email protected]'
ANALYTICS_URL = ('/' + COURSE_NAME +
'/dashboard?action=analytics_certificates_earned')
def setUp(self):
super(CertificateCriteriaTestCase, self).setUp()
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'Certificate Criteria')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, context)
self.course.save()
self.TEST_USER = actions.login('[email protected]')
actions.register(self, self.TEST_USER.email())
self.student = (
models.StudentProfileDAO.get_enrolled_student_by_user_for(
self.TEST_USER, context))
# Override course.yaml settings by patching app_context.
self.get_environ_old = sites.ApplicationContext.get_environ
self.certificate_criteria = []
def get_environ_new(app_context):
environ = self.get_environ_old(app_context)
environ['certificate_criteria'] = self.certificate_criteria
return environ
sites.ApplicationContext.get_environ = get_environ_new
def tearDown(self):
# Clean up app_context.
sites.ApplicationContext.get_environ = self.get_environ_old
namespace_manager.set_namespace(self.old_namespace)
super(CertificateCriteriaTestCase, self).tearDown()
def _assert_redirect_to_course_landing_page(self, response):
self.assertEquals(302, response.status_code)
self.assertEquals('http://localhost/' + self.COURSE_NAME + '/', (
response.headers['Location']))
def test_no_criteria(self):
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
def _run_analytic_and_expect(self, expected_students,
expected_active_students,
expected_certificates):
actions.login(self.ADMIN_EMAIL)
response = self.get(self.ANALYTICS_URL)
self.submit(response.forms['gcb-run-visualization-certificates_earned'],
response)
self.execute_all_deferred_tasks()
dom = self.parse_html_string(self.get(self.ANALYTICS_URL).body)
total_students = int(
dom.find('.//span[@id="total_students"]').text)
total_active_students = int(
dom.find('.//span[@id="total_active_students"]').text)
total_certificates = int(
dom.find('.//span[@id="total_certificates"]').text)
self.assertEquals(expected_students, total_students)
self.assertEquals(expected_active_students, total_active_students)
self.assertEquals(expected_certificates, total_certificates)
actions.login(self.STUDENT_EMAIL)
def test_no_criteria_analytic(self):
self._run_analytic_and_expect(1, 0, 0)
def test_machine_graded(self):
assessment = self.course.add_assessment()
assessment.title = 'Assessment'
assessment.html_content = 'assessment content'
assessment.now_available = True
self.course.save()
self.certificate_criteria.append(
{'assessment_id': assessment.unit_id, 'pass_percent': 70.0})
# Student has not yet completed assessment, expect redirect to home page
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
self._run_analytic_and_expect(1, 0, 0) # 1 student, 0 active, no cert.
# Submit assessment with low score
actions.submit_assessment(
self,
assessment.unit_id,
{'answers': '', 'score': 50.0,
'assessment_type': assessment.unit_id},
presubmit_checks=False
)
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
self._run_analytic_and_expect(1, 1, 0) # 1 student, 1 active, no cert
# Submit assessment with expected score
actions.submit_assessment(
self,
assessment.unit_id,
{'answers': '', 'score': 70,
'assessment_type': assessment.unit_id},
presubmit_checks=False
)
response = self.get('certificate')
self.assertEquals(200, response.status_code)
self._run_analytic_and_expect(1, 1, 1) # 1 student, 1 active, 1 cert
def _submit_review(self, assessment):
"""Submits a review by the current student.
Creates a new user that completes the assessment as well,
so that the student can review it.
Args:
assessment: The assessment to review.
"""
reviewer_key = self.student.get_key()
reviewee = models.Student(key_name='[email protected]')
reviewee_key = reviewee.put()
submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=reviewee_key, unit_id=str(assessment.unit_id)))
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee_key,
submission_key=submission_key, unit_id=str(assessment.unit_id)
).put()
review_key = student_work.Review(
contents='old_contents', reviewee_key=reviewee_key,
reviewer_key=reviewer_key, unit_id=str(assessment.unit_id)).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=reviewee_key, reviewer_key=reviewer_key,
submission_key=submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=str(assessment.unit_id)
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents')
self.assertEqual(step_key, updated_step_key)
def test_peer_graded(self):
assessment = self.course.add_assessment()
assessment.title = 'Assessment'
assessment.html_content = 'assessment content'
assessment.workflow_yaml = (
'{grader: human,'
'matcher: peer,'
'review_due_date: \'2034-07-01 12:00\','
'review_min_count: 1,'
'review_window_mins: 20,'
'submission_due_date: \'2034-07-01 12:00\'}')
assessment.now_available = True
self.course.save()
self.certificate_criteria.append(
{'assessment_id': assessment.unit_id})
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
actions.submit_assessment(
self,
assessment.unit_id,
{'answers': '', 'assessment_type': assessment.unit_id},
presubmit_checks=False
)
# Submitting assessment without doing required reviews is not enough
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
# Submitting assessment together with required reviews is enough
self._submit_review(assessment)
response = self.get('certificate')
self.assertEquals(200, response.status_code)
def test_custom_criteria(self):
def test_custom_criterion(unused_student, unused_course):
return True
CRITERION = 'test_custom_criterion'
self.certificate_criteria.append(
{'custom_criteria': CRITERION})
setattr(custom_criteria, CRITERION, test_custom_criterion)
custom_criteria.registration_table.append(CRITERION)
response = self.get('certificate')
self.assertEquals(200, response.status_code)
def test_combination(self):
# Add machine graded assessment
machine_graded = self.course.add_assessment()
machine_graded.title = 'Machine Graded'
machine_graded.html_content = 'assessment content'
machine_graded.now_available = True
# Add peer graded assessment
peer_graded = self.course.add_assessment()
peer_graded.title = 'Peer Graded'
peer_graded.html_content = 'assessment content'
peer_graded.workflow_yaml = (
'{grader: human,'
'matcher: peer,'
'review_due_date: \'2034-07-01 12:00\','
'review_min_count: 1,'
'review_window_mins: 20,'
'submission_due_date: \'2034-07-01 12:00\'}')
peer_graded.now_available = True
self.course.save()
self.certificate_criteria.extend([
{'assessment_id': machine_graded.unit_id, 'pass_percent': 30},
{'assessment_id': peer_graded.unit_id}])
# Confirm that meeting one criterion is not sufficient
actions.submit_assessment(
self,
machine_graded.unit_id,
{'answers': '', 'score': 40,
'assessment_type': machine_graded.unit_id},
presubmit_checks=False
)
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
# Confirm that meeting both criteria is sufficient
actions.submit_assessment(
self,
peer_graded.unit_id,
{'answers': '', 'assessment_type': peer_graded.unit_id},
presubmit_checks=False
)
self._submit_review(peer_graded)
response = self.get('certificate')
self.assertEquals(200, response.status_code)
|
|
#! /usr/bin/python
"""
Module which handles the creation of a Caravan Scenario, an object representing a
Caravan input event. Scenario stems from the name used in the database to indicate a seismic input event.
The module uses globals.py (glb in the module) to cast and check each new value added to a Scenario, for
which its key matches a particular parameter defined in globals.py
Moreover, a Scenario can write to a database its values (using a special function to calculate an unique hash
from its values)
(c) 2014, GFZ Potsdam
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any later
version. For more information, see http://www.gnu.org/
"""
__author__="Riccardo Zaccarelli, PhD (<riccardo(at)gfz-potsdam.de>, <riccardo.zaccarelli(at)gmail.com>)"
__date__ ="$Sep 26, 2014 3:18:42 PM$"
from __builtin__ import hash as builtin_hash
import hashlib
import caravan.settings.globals as glb
from gmpes.gmpes import getgmpe
import caravan.settings.globalkeys as gk
import mcerp
import caravan.parser as prs
def hash(value):
"""
returns the hash of value BUT accepts lists (which are converted to tuples in case) and dicts
So hash([[1,2], "a" , {}, (4,)]) works
"""
def is_mutable(value, opt_value_type=None): #is just a hint, returns True if value is dict, list, or tuple containing (recursive search) a dict or list
t = type(value) if opt_value_type is None else opt_value_type
if t == list or t==dict:
return True
elif t == tuple:
has_mutable_children = False
len_ = len(value)
i=0
while i<len_ and not has_mutable_children:
has_mutable_children = is_mutable(value[i])
i+=1
return has_mutable_children
else:
return False
def convert(value):
t = type(value)
if t == list or (t == tuple and is_mutable(value, t)):
len_ = len(value)
k = [None]*len_ #initialize a list
for i in xrange(len_):
k[i] = convert(value[i])
return tuple(k)
elif t == dict:
k = [None]*2*len(value) #initialize a list
i=0
for key in value:
k[i] = convert(key)
k[i+1] = convert(value[key])
i+=2
return tuple(k)
return value
return builtin_hash(convert(value))
_scenario_name = 'scenario_name' #FIXME: is public. Good choice?
def scenario_hash(dict_):
"""
Returns the hash of dict_ (a dict) according to s=glb.scenario_db_cols
Builds a list L of N None values, where N = len(s). Then, for any key of dict_
which is found in s, takes the relative index and puts L[index] = dict_[key]
Returns the hash of L (as tuple, which is hashable)
"""
params = glb.params
dbindices = glb.scenario_db_cols
dblen = len(dbindices)
dbarray = [None]*dblen
for k in dict_:
if not k in params:
continue
param = params[k]
if _scenario_name in param:
column_name = param[_scenario_name]
if column_name in dbindices:
col_index = dbindices[column_name]
dbarray[col_index] = dict_[k]
return hash(dbarray)
def val_sc(pkey, p, value):
"""
Returns the scenario value of the value argument, already casted according to p=glb.params[pkey]
"""
if pkey == gk.IPE:
return glb.def_gmpes[value]
if pkey == gk.SOF:
return glb.sof[value]
isdistrib = 'distrib' in p
if isdistrib:
dist_name = p['distrib'].lower().title()
if not prs.isscalar(value): #note that UncertainFunction is NOT scalar,
#but here we should have only numbers or array of numbers (according to params in glb)
#try build a distribution with the given value(s)
return mcerp.__dict__[dist_name](*value)
return value
def val_db(pkey, p, value):
"""
Returns the scenario value of the value argument, already casted according to p=glb.params[pkey]
"""
isdistrib = 'distrib' in p
if isdistrib:
dist_name = p['distrib'].lower().title()
if prs.isscalar(value):
#value is scalar, define how to set db value:
if dist_name == "Uniform" :
return [value, value]
elif dist_name == "Normal":
return [value, 0]
else:
raise Exception("{0} distribution not implemented in cast function (scenario.py). Please contact the administrator".format(dist_name))
return value
def tostr(dict, html=True):
n = "\n" if not html else "<br>"
s = " " if not html else " "
return "{{{0}{1}{0}}}".format(n, ',{0}'.format(n).join("{0}{1} = {2}".format(s, k, str(dict[k])) for k in dict))
#from collections import OrderedDict
#class Scenario(OrderedDict):
class Scenario(dict):
def __init__(self, *args, **kwargs):
super(Scenario, self).__init__() #<-- HERE call to super __init__. Useless if extending dict, necessary if extending OrderedDict
self.__db = {}
self.__dbhash = None
self.update(*args, **kwargs) #note: pass args as positional arguments, not as (single) list owning those arguments
self.__text = None
self.dbtext() # holds a string representation of the scenario, it will be used to evaluate the hash
self.dbhash() # why not just generating the hash upon initialization? Parameters wont change from now on
# holds a string representation of the scenario, it will be used to evaluate the hash
def dbtext(self):
if self.__text is None:
self.__text = ""
for key, value in self.__db.items():
self.__text +=( str(key) +":")
if "parse_opts" in glb.params[key] and "decimals" in glb.params[key]["parse_opts"]:
decimals = int( glb.params[key]["parse_opts"]["decimals"] )
self.__text +=( self.__extend_to_proper_lenght__(value, decimals) +';' )
else:
self.__text +=( str(value) +';' )
self.__text = self.__text.rstrip(';')
return self.__text
# expends parameters to proper length (important for creating dbtext)
def __extend_to_proper_lenght__(self, value, decimals):
if hasattr(value, "__iter__"):
value_str = "("
for i in value:
value_str +=( self.__extend_to_proper_lenght__(i, decimals) +"," )
value_str = value_str.rstrip(',')
value_str += ")"
else:
value_str = str(value)
if decimals > 0 and not "." in value_str:
value_str += "."
zeroes_to_append = decimals -( len(value_str) -(value_str.find('.') +1) )
if zeroes_to_append > 0:
value_str +=( '0' *zeroes_to_append )
return value_str
def __setitem__(self, key, value):
params = glb.params
k_low = key.lower()
if k_low in params:
p = params[k_low]
value = glb.cast(p, value)
#cast to db_val:
if _scenario_name in p:
self.__db[k_low] = val_db(k_low, p, value)
self.__dbhash = None
#cast for scenario (self):
value = val_sc(k_low, p, value)
super(Scenario, self).__setitem__(k_low, value)
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("Scenario.update expected at most 1 arguments, got {:d}" .format (len(args)) )
#if a scenario, don't check:
if isinstance(args[0], Scenario):
self.__dbhash = None #lazily initialized. In this case we don't call __setitem__ thus we need to call it here
self.__db = args[0].__db.copy()
for key in args[0]:
super(Scenario, self).__setitem__(key, args[0][key])
else: #normal method:
other = dict(args[0])
for key in other:
self[key] = other[key] #calls __setitem__
for key in kwargs:
self[key] = kwargs[key] #calls __setitem__
def setdefault(self, key, value=None):
if key not in self:
self[key] = value #calls __setitem__
return self[key]
#from here:http://stackoverflow.com/questions/1436703/difference-between-str-and-repr-in-python
#repr for developers, str for users
#but should be worth a read, it's apparently a long story...
# def __repr__(self):
# return tostr(self)
def dbstr(self):
return tostr(self.__db)
@property
def gmpe(self):
"""
Returns the gmpe used for this scenario. NOTE THAT INTERNALLY THE GMPE IS THE TUPLE
(gmpe_class_name, gmpe_dbase_index) (see cast function)
"""
#REPLACE PARAMETERS WITH GMPE PARAMS. FIXME: VERY "DIRTY", DO SEOMTHING BETTER?
d = {}
d['m'] = self[gk.MAG]
d['lat'] = self[gk.LAT]
d['lon'] = self[gk.LON]
d['depth'] = self[gk.DEP]
if gk.SOF in self:
d['sof'] = self[gk.SOF]
d['strike'] = self[gk.STR]
d['dip'] = self[gk.DIP]
#FIXME: this adds also unnecessary arguments to the gmpe, including the GMPE name itself!
#leave it like this??!!
return self[gk.IPE](**d)
def dbhash(self):
if self.__dbhash is None:
# lazily calculate it (it is set to None at startup and any time a key which has a
# parameter in glb.params p is found (and p must have the key _scenario_name)
#self.__dbhash = scenario_hash(self.__db)
# switching hashing mechanism to md5, in hexadecimal format
# note that hexdigest needs to be shortened and casted to int
# as the database expects the hash to be a bigint (in a certain range)
self.__dbhash = int(hashlib.md5(self.__text).hexdigest()[0:15], 16)
# at most 15 digits can be used because more digits would lead to an out-of-range bigint (database)
# self.__dbhash = int('f'*15, 16)
# note that still this guaranties a good level of uniqueness
# https://stackoverflow.com/questions/2510716/short-python-alphanumeric-hash-with-minimal-collisions
return self.__dbhash
def writetodb(self, dbconn):
"""
Writes this scenario to database returning the tuple scenario_id, isNew
where the first item is an integer (serial key to access the scenario again)
and the second argument is a boolean indicating whether the scenario has been written to database
(true) or an already existing scenario with the same the hash as the cuirrent scenario has been found
(false): in that case scenario_id is the already existing sceenario id
Raises excpetions if there are more than one scenarios with the same hash
"""
scenario_hash = self.dbhash()
scenarios = dbconn.fetchall("select * from processing.scenarios where hash=%s;" , (scenario_hash,))
#the line above returns list objects. To return dict objects see
#https://wiki.postgresql.org/wiki/Using_psycopg2_with_PostgreSQL
#cursor.close()
#check if exists:
if len(scenarios) > 1:
raise Exception("{:d} scenarios with hash={:d}: database error, please contact the administrator".format(len(scenarios), scenario_hash));
elif len(scenarios) == 1:
return scenarios[0][0], False
params = glb.params
dbkeys = ['hash']
dbvals = [scenario_hash]
for k in self.__db:
if not k in params: continue #for safety
p = params[k]
if _scenario_name in p: #for safety, again
dbkeys.append(p[_scenario_name])
dbvals.append(self.__db[k])
dbkeys_str = ','.join([k for k in dbkeys])
db_str = ",".join(["%s" for _ in dbkeys])
arg1 = """INSERT INTO processing.scenarios ({0}) VALUES ({1});""" .format (dbkeys_str, db_str)
arg2 = tuple(dbvals)
dbconn.execute(arg1, arg2)
dbconn.commit()
scenarios = dbconn.fetchall("select * from processing.scenarios where hash=%s;" , (scenario_hash,))
return scenarios[0][0], True
# if __name__ == '__main__':
#
# from __future__ import print_function
# # from collections import OrderedDict
#
# #min_dist(7, 15, 5, 20)
# event = {'longitude': '74.2354 0.5', 'latitude':42.8, 'depth':15, 'strike':0, 'ipe':2, 'magnitude':6.8}
# db = Scenario(event)
#
# print(str(db))
#
# print(scenario_hash(event))
|
|
"""
Pkgutil support for Solaris
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
"""
import copy
import salt.utils.data
import salt.utils.functools
import salt.utils.pkg
import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError
# Define the module's virtual name
__virtualname__ = "pkgutil"
def __virtual__():
"""
Set the virtual pkg module if the os is Solaris
"""
if __grains__["os_family"] == "Solaris":
return __virtualname__
return (
False,
"The pkgutil execution module cannot be loaded: "
"only available on Solaris systems.",
)
def refresh_db():
"""
Updates the pkgutil repo database (pkgutil -U)
CLI Example:
.. code-block:: bash
salt '*' pkgutil.refresh_db
"""
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
return __salt__["cmd.retcode"]("/opt/csw/bin/pkgutil -U") == 0
def upgrade_available(name):
"""
Check if there is an upgrade available for a certain package
CLI Example:
.. code-block:: bash
salt '*' pkgutil.upgrade_available CSWpython
"""
version_num = None
cmd = "/opt/csw/bin/pkgutil -c --parse --single {}".format(name)
out = __salt__["cmd.run_stdout"](cmd)
if out:
version_num = out.split()[2].strip()
if version_num:
if version_num == "SAME":
return ""
else:
return version_num
return ""
def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
"""
List all available package upgrades on this system
CLI Example:
.. code-block:: bash
salt '*' pkgutil.list_upgrades
"""
if salt.utils.data.is_true(refresh):
refresh_db()
upgrades = {}
lines = __salt__["cmd.run_stdout"]("/opt/csw/bin/pkgutil -A --parse").splitlines()
for line in lines:
comps = line.split("\t")
if comps[2] == "SAME":
continue
if comps[2] == "not installed":
continue
upgrades[comps[0]] = comps[1]
return upgrades
def upgrade(refresh=True):
"""
Upgrade all of the packages to the latest available version.
Returns a dict containing the changes::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkgutil.upgrade
"""
if salt.utils.data.is_true(refresh):
refresh_db()
old = list_pkgs()
# Install or upgrade the package
# If package is already installed
cmd = "/opt/csw/bin/pkgutil -yu"
__salt__["cmd.run_all"](cmd)
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
return salt.utils.data.compare_dicts(old, new)
def _list_pkgs_from_context(versions_as_list):
"""
Use pkg list from __context__
"""
if versions_as_list:
return __context__["pkg.list_pkgs"]
else:
ret = copy.deepcopy(__context__["pkg.list_pkgs"])
__salt__["pkg_resource.stringify"](ret)
return ret
def list_pkgs(versions_as_list=False, **kwargs):
"""
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs versions_as_list=True
"""
versions_as_list = salt.utils.data.is_true(versions_as_list)
# 'removed' not yet implemented or not applicable
if salt.utils.data.is_true(kwargs.get("removed")):
return {}
if "pkg.list_pkgs" in __context__ and kwargs.get("use_context", True):
return _list_pkgs_from_context(versions_as_list)
ret = {}
cmd = "/usr/bin/pkginfo -x"
# Package information returned two lines per package. On even-offset
# lines, the package name is in the first column. On odd-offset lines, the
# package version is in the second column.
lines = __salt__["cmd.run"](cmd).splitlines()
for index, line in enumerate(lines):
if index % 2 == 0:
name = line.split()[0].strip()
if index % 2 == 1:
version_num = line.split()[1].strip()
__salt__["pkg_resource.add_pkg"](ret, name, version_num)
__salt__["pkg_resource.sort_pkglist"](ret)
__context__["pkg.list_pkgs"] = copy.deepcopy(ret)
if not versions_as_list:
__salt__["pkg_resource.stringify"](ret)
return ret
def version(*names, **kwargs):
"""
Returns a version if the package is installed, else returns an empty string
CLI Example:
.. code-block:: bash
salt '*' pkgutil.version CSWpython
"""
return __salt__["pkg_resource.version"](*names, **kwargs)
def latest_version(*names, **kwargs):
"""
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
CLI Example:
.. code-block:: bash
salt '*' pkgutil.latest_version CSWpython
salt '*' pkgutil.latest_version <package1> <package2> <package3> ...
"""
refresh = salt.utils.data.is_true(kwargs.pop("refresh", True))
if not names:
return ""
ret = {}
# Initialize the dict with empty strings
for name in names:
ret[name] = ""
# Refresh before looking for the latest version available
if refresh:
refresh_db()
pkgs = list_pkgs()
cmd = "/opt/csw/bin/pkgutil -a --parse {}".format(" ".join(names))
output = __salt__["cmd.run_all"](cmd).get("stdout", "").splitlines()
for line in output:
try:
name, version_rev = line.split()[1:3]
except ValueError:
continue
if name in names:
cver = pkgs.get(name, "")
nver = version_rev.split(",")[0]
if not cver or salt.utils.versions.compare(ver1=cver, oper="<", ver2=nver):
# Remove revision for version comparison
ret[name] = version_rev
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = salt.utils.functools.alias_function(
latest_version, "available_version"
)
def install(name=None, refresh=False, version=None, pkgs=None, **kwargs):
"""
Install packages using the pkgutil tool.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package_name>
salt '*' pkg.install SMClgcc346
Multiple Package Installation Options:
pkgs
A list of packages to install from OpenCSW. Must be passed as a python
list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3"}]'
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
"""
if refresh:
refresh_db()
try:
# Ignore 'sources' argument
pkg_params = __salt__["pkg_resource.parse_targets"](name, pkgs, **kwargs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
if pkg_params is None or len(pkg_params) == 0:
return {}
if pkgs is None and version and len(pkg_params) == 1:
pkg_params = {name: version}
targets = []
for param, pkgver in pkg_params.items():
if pkgver is None:
targets.append(param)
else:
targets.append("{}-{}".format(param, pkgver))
cmd = "/opt/csw/bin/pkgutil -yu {}".format(" ".join(targets))
old = list_pkgs()
__salt__["cmd.run_all"](cmd)
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
return salt.utils.data.compare_dicts(old, new)
def remove(name=None, pkgs=None, **kwargs):
"""
Remove a package and all its dependencies which are not in use by other
packages.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
"""
try:
pkg_params = __salt__["pkg_resource.parse_targets"](name, pkgs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
targets = [x for x in pkg_params if x in old]
if not targets:
return {}
cmd = "/opt/csw/bin/pkgutil -yr {}".format(" ".join(targets))
__salt__["cmd.run_all"](cmd)
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
return salt.utils.data.compare_dicts(old, new)
def purge(name=None, pkgs=None, **kwargs):
"""
Package purges are not supported, this function is identical to
``remove()``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
"""
return remove(name=name, pkgs=pkgs)
|
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
import datetime
from django.conf import settings
from django.core.management.color import no_style
from django.db import backend, connection, connections, DEFAULT_DB_ALIAS, IntegrityError
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.test import TestCase, skipUnlessDBFeature, TransactionTestCase
from django.utils import unittest
from regressiontests.backends import models
class OracleChecks(unittest.TestCase):
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
convert_unicode = backend.convert_unicode
cursor = connection.cursor()
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!'),])
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
cursor = connection.cursor()
var = cursor.var(backend.Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
c = connection.cursor()
c.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join([unicode(x) for x in xrange(4000)])
c.execute('INSERT INTO ltext VALUES (%s)',[long_str])
c.execute('SELECT text FROM ltext')
row = c.fetchone()
self.assertEqual(long_str, row[0].read())
c.execute('DROP TABLE ltext')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
c = connection.cursor() # Ensure the connection is initialized.
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.datetime(2010, 1, 1, 0, 0)])
def test_django_extract(self):
"""
Test the custom ``django_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
class LastExecutedQueryTest(TestCase):
def setUp(self):
# connection.queries will not be filled in without this
settings.DEBUG = True
def tearDown(self):
settings.DEBUG = False
# There are no tests for the sqlite backend because it does not
# implement paramater escaping. See #14091.
@unittest.skipUnless(connection.vendor in ('oracle', 'postgresql'),
"These backends use the standard parameter escaping rules")
def test_parameter_escaping(self):
# check that both numbers and string are properly quoted
list(models.Tag.objects.filter(name="special:\\\"':", object_id=12))
sql = connection.queries[-1]['sql']
self.assertTrue("= 'special:\\\"'':' " in sql)
self.assertTrue("= 12 " in sql)
@unittest.skipUnless(connection.vendor == 'mysql',
"MySQL uses backslashes to escape parameters.")
def test_parameter_escaping(self):
list(models.Tag.objects.filter(name="special:\\\"':", object_id=12))
sql = connection.queries[-1]['sql']
# only this line is different from the test above
self.assertTrue("= 'special:\\\\\\\"\\':' " in sql)
self.assertTrue("= 12 " in sql)
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1,2,3),])
self.assertRaises(Exception, cursor.executemany, query, [(1,),])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertTrue(obj.pk > 10)
class PostgresVersionTest(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
self.assert_parses("PostgreSQL 8.3 beta4", (8, 3, None))
self.assert_parses("PostgreSQL 8.3", (8, 3, None))
self.assert_parses("EnterpriseDB 8.3", (8, 3, None))
self.assert_parses("PostgreSQL 8.3.6", (8, 3, 6))
self.assert_parses("PostgreSQL 8.4beta1", (8, 4, None))
self.assert_parses("PostgreSQL 8.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", (8, 3, 1))
# Unfortunately with sqlite3 the in-memory test database cannot be
# closed, and so it cannot be re-opened during testing, and so we
# sadly disable this test for now.
class ConnectionCreatedSignalTest(TestCase):
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
cursor = connection.cursor()
self.assertTrue(data["connection"] is connection)
connection_created.disconnect(receiver)
data.clear()
cursor = connection.cursor()
self.assertTrue(data == {})
class EscapingChecks(TestCase):
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is a sqlite-specific issue")
def test_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
response = cursor.execute(
"select strftime('%%s', date('now'))").fetchall()[0][0]
self.assertNotEqual(response, None)
# response should be an non-zero integer
self.assertTrue(int(response))
class BackendTestCase(TestCase):
def test_cursor_executemany(self):
#4896: Test cursor.executemany
cursor = connection.cursor()
qn = connection.ops.quote_name
opts = models.Square._meta
f1, f2 = opts.get_field('root'), opts.get_field('square')
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)'
% (connection.introspection.table_name_converter(opts.db_table), qn(f1.column), qn(f2.column)))
cursor.executemany(query, [(i, i**2) for i in range(-5, 6)])
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
#4765: executemany with params=[] does nothing
cursor.executemany(query, [])
self.assertEqual(models.Square.objects.count(), 11)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), (u'Clark', u'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [(u'Jane', u'Doe'), (u'John', u'Doe')])
self.assertEqual(list(cursor.fetchall()), [(u'Mary', u'Agnelline'), (u'Peter', u'Parker')])
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a.save()
except IntegrityError:
pass
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
a.save()
except IntegrityError:
pass
|
|
import unittest
from cpu import *
class ROOperand(object):
''' Mocking class for operand ronly '''
def __init__(self, size, value):
self.size = size
self.value = value
def read(self):
return self.value & ((1<<self.size)-1)
class RWOperand(ROOperand):
''' Mocking class for operand rw '''
def write(self, value):
self.value = value & ((1<<self.size)-1)
return self.value
class CPUTest(unittest.TestCase):
def setUp(self):
class Memory: #todo Mock
def getchar(self, addr):
raise NotImplemented()
def putchar(self, addr, value):
raise NotImplemented()
mem = Memory()
self.cpu = Cpu(mem, 'i386') #TODO reset cpu in between tests...
#TODO mock getchar/putchar in case the instructon access memory directly
def tearDown(self):
self.cpu = None
def testInitialRegState(self):
cpu = self.cpu
#'CR0', 'CR1', 'CR2', 'CR3', 'CR4', 'CR5', 'CR6', 'CR7', 'CR8',
# 'DR0', 'DR1', 'DR2', 'DR3', 'DR4', 'DR5', 'DR6', 'DR7',
#'MM0', 'MM1', 'MM2', 'MM3', 'MM4', 'MM5', 'MM6', 'MM7',
# 'ST0', 'ST1', 'ST2', 'ST3', 'ST4', 'ST5', 'ST6', 'ST7'
for reg_name in ['AH', 'AL', 'AX', 'BH', 'BL', 'BP', 'BPL', 'BX', 'CH', 'CL', 'CS', 'CX', 'DH', 'DI', 'DIL', 'DL', 'DS', 'DX', 'EAX', 'EBP', 'EBX', 'ECX', 'EDI', 'EDX', 'EFLAGS', 'EIP', 'ES', 'ESI', 'ESP', 'FLAGS', 'FS', 'GS', 'R10', 'R10B', 'R10D', 'R10W', 'R11', 'R11B', 'R11D', 'R11W', 'R12', 'R12B', 'R12D', 'R12W', 'R13', 'R13B', 'R13D', 'R13W', 'R14', 'R14B', 'R14D', 'R14W', 'R15', 'R15B', 'R15D', 'R15W', 'R8', 'R8B', 'R8D', 'R8W', 'R9', 'R9B', 'R9D', 'R9W', 'RAX', 'RBP', 'RBX', 'RCX', 'RDI', 'RDX', 'RFLAGS', 'RIP', 'RSI', 'RSP', 'SI', 'SIL', 'SP', 'SPL', 'SS', 'XMM0', 'XMM1', 'XMM10', 'XMM11', 'XMM12', 'XMM13', 'XMM14', 'XMM15', 'XMM2', 'XMM3', 'XMM4', 'XMM5', 'XMM6', 'XMM7', 'XMM8', 'XMM9', 'YMM0', 'YMM1', 'YMM10', 'YMM11', 'YMM12', 'YMM13', 'YMM14', 'YMM15', 'YMM2', 'YMM3', 'YMM4', 'YMM5', 'YMM6', 'YMM7', 'YMM8', 'YMM9']:
self.assertEqual(cpu.getRegister(reg_name),0)
def testRegisterCacheAccess(self):
cpu = self.cpu
cpu.ESI = 0x12345678
self.assertEqual(cpu.ESI, 0x12345678)
cpu.SI = 0xAAAA
self.assertEqual(cpu.SI, 0xAAAA)
cpu.RAX = 0x12345678aabbccdd
self.assertEqual(cpu.ESI, 0x1234AAAA)
cpu.SI = 0xAAAA
self.assertEqual(cpu.SI, 0xAAAA)
def testFlagAccess(self):
'''_flags
'CF': 0x0001,
'PF': 0x0004,
'AF': 0x0010,
'ZF': 0x0040,
'SF': 0x0080,
'DF': 0x0400,
'OF': 0x0800,'''
cpu = self.cpu
cpu.RFLAGS = 0
self.assertFalse(cpu.CF)
self.assertFalse(cpu.PF)
self.assertFalse(cpu.AF)
self.assertFalse(cpu.ZF)
self.assertFalse(cpu.SF)
self.assertFalse(cpu.DF)
self.assertFalse(cpu.OF)
#flag to register CF
cpu.CF = True
self.assertTrue( cpu.RFLAGS&cpu._flags['CF'] !=0)
cpu.CF = False
self.assertTrue( cpu.RFLAGS&cpu._flags['CF'] ==0)
#register to flag CF
cpu.RFLAGS |= cpu._flags['CF']
self.assertTrue(cpu.CF)
cpu.RFLAGS &= ~cpu._flags['CF']
self.assertFalse(cpu.CF)
#flag to register PF
cpu.PF = True
self.assertTrue( cpu.RFLAGS&cpu._flags['PF'] !=0)
cpu.PF = False
self.assertTrue( cpu.RFLAGS&cpu._flags['PF'] ==0)
#register to flag PF
cpu.RFLAGS |= cpu._flags['PF']
self.assertTrue(cpu.PF)
cpu.RFLAGS &= ~cpu._flags['PF']
self.assertFalse(cpu.PF)
#flag to register AF
cpu.AF = True
self.assertTrue( cpu.RFLAGS&cpu._flags['AF'] !=0)
cpu.AF = False
self.assertTrue( cpu.RFLAGS&cpu._flags['AF'] ==0)
#register to flag AF
cpu.RFLAGS |= cpu._flags['AF']
self.assertTrue(cpu.AF)
cpu.RFLAGS &= ~cpu._flags['AF']
self.assertFalse(cpu.AF)
#flag to register ZF
cpu.ZF = True
self.assertTrue( cpu.RFLAGS&cpu._flags['ZF'] !=0)
cpu.ZF = False
self.assertTrue( cpu.RFLAGS&cpu._flags['ZF'] ==0)
#register to flag ZF
cpu.RFLAGS |= cpu._flags['ZF']
self.assertTrue(cpu.ZF)
cpu.RFLAGS &= ~cpu._flags['ZF']
self.assertFalse(cpu.ZF)
#flag to register SF
cpu.SF = True
self.assertTrue( cpu.RFLAGS&cpu._flags['SF'] !=0)
cpu.SF = False
self.assertTrue( cpu.RFLAGS&cpu._flags['SF'] ==0)
#register to flag SF
cpu.RFLAGS |= cpu._flags['SF']
self.assertTrue(cpu.SF)
cpu.RFLAGS &= ~cpu._flags['SF']
self.assertFalse(cpu.SF)
#flag to register DF
cpu.DF = True
self.assertTrue( cpu.RFLAGS&cpu._flags['DF'] !=0)
cpu.DF = False
self.assertTrue( cpu.RFLAGS&cpu._flags['DF'] ==0)
#register to flag DF
cpu.RFLAGS |= cpu._flags['DF']
self.assertTrue(cpu.DF)
cpu.RFLAGS &= ~cpu._flags['DF']
self.assertFalse(cpu.DF)
#flag to register OF
cpu.OF = True
self.assertTrue( cpu.RFLAGS&cpu._flags['OF'] !=0)
cpu.OF = False
self.assertTrue( cpu.RFLAGS&cpu._flags['OF'] ==0)
#register to flag
cpu.RFLAGS |= cpu._flags['OF']
self.assertTrue(cpu.OF)
cpu.RFLAGS &= ~cpu._flags['OF']
self.assertFalse(cpu.OF)
def testRegisterAccess(self):
cpu = self.cpu
#initially zero
self.assertEqual(cpu.EAX, 0)
cpu.EAX += 1
self.assertEqual(cpu.EAX, 1)
cpu.EAX = 0x8000000
self.assertEqual(cpu.EAX, 0x8000000)
cpu.EAX = 0xff000000
self.assertEqual(cpu.EAX, 0xff000000)
cpu.EAX = 0x00ff0000
self.assertEqual(cpu.EAX, 0x00ff0000)
cpu.EAX = 0x0000ff00
self.assertEqual(cpu.EAX, 0x0000ff00)
cpu.EAX = 0xff
self.assertEqual(cpu.EAX, 0xff)
#overflow shall be discarded
cpu.EAX = 0x100000000
self.assertEqual(cpu.EAX, 0x00000000)
#partial register access
cpu.EAX = 0x11223344
self.assertEqual(cpu.EAX, 0x11223344)
self.assertEqual(cpu.AX, 0x3344)
self.assertEqual(cpu.AH, 0x33)
self.assertEqual(cpu.AL, 0x44)
#partial register mod
cpu.AL=0xDD
self.assertEqual(cpu.EAX, 0x112233DD)
self.assertEqual(cpu.AX, 0x33DD)
self.assertEqual(cpu.AH, 0x33)
self.assertEqual(cpu.AL, 0xDD)
cpu.AH=0xCC
self.assertEqual(cpu.EAX, 0x1122CCDD)
self.assertEqual(cpu.AX, 0xCCDD)
self.assertEqual(cpu.AH, 0xCC)
self.assertEqual(cpu.AL, 0xDD)
#partial register mod is truncated
cpu.AL=0x1234DD
self.assertEqual(cpu.EAX, 0x1122CCDD)
self.assertEqual(cpu.AX, 0xCCDD)
self.assertEqual(cpu.AH, 0xCC)
self.assertEqual(cpu.AL, 0xDD)
cpu.EDX = 0x8048c50
self.assertEqual(cpu.EDX, 0x8048c50)
if __name__ == '__main__':
unittest.main()
|
|
"""
Title: Graph attention network (GAT) for node classification
Author: [akensert](https://github.com/akensert)
Date created: 2021/09/13
Last modified: 2021/12/26
Description: An implementation of a Graph Attention Network (GAT) for node classification.
"""
"""
## Introduction
[Graph neural networks](https://en.wikipedia.org/wiki/Graph_neural_network)
is the prefered neural network architecture for processing data structured as
graphs (for example, social networks or molecule structures), yielding
better results than fully-connected networks or convolutional networks.
In this tutorial, we will implement a specific graph neural network known as a
[Graph Attention Network](https://arxiv.org/abs/1710.10903) (GAT) to predict labels of
scientific papers based on what type of papers cite them (using the
[Cora](https://linqs.soe.ucsc.edu/data) dataset).
### References
For more information on GAT, see the original paper
[Graph Attention Networks](https://arxiv.org/abs/1710.10903) as well as
[DGL's Graph Attention Networks](https://docs.dgl.ai/en/0.4.x/tutorials/models/1_gnn/9_gat.html)
documentation.
"""
"""
### Import packages
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import pandas as pd
import os
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", 6)
pd.set_option("display.max_rows", 6)
np.random.seed(2)
"""
## Obtain the dataset
The preparation of the [Cora dataset](https://linqs.soe.ucsc.edu/data) follows that of the
[Node classification with Graph Neural Networks](https://keras.io/examples/graph/gnn_citations/)
tutorial. Refer to this tutorial for more details on the dataset and exploratory data analysis.
In brief, the Cora dataset consists of two files: `cora.cites` which contains *directed links* (citations) between
papers; and `cora.content` which contains *features* of the corresponding papers and one
of seven labels (the *subject* of the paper).
"""
zip_file = keras.utils.get_file(
fname="cora.tgz",
origin="https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz",
extract=True,
)
data_dir = os.path.join(os.path.dirname(zip_file), "cora")
citations = pd.read_csv(
os.path.join(data_dir, "cora.cites"),
sep="\t",
header=None,
names=["target", "source"],
)
papers = pd.read_csv(
os.path.join(data_dir, "cora.content"),
sep="\t",
header=None,
names=["paper_id"] + [f"term_{idx}" for idx in range(1433)] + ["subject"],
)
class_values = sorted(papers["subject"].unique())
class_idx = {name: id for id, name in enumerate(class_values)}
paper_idx = {name: idx for idx, name in enumerate(sorted(papers["paper_id"].unique()))}
papers["paper_id"] = papers["paper_id"].apply(lambda name: paper_idx[name])
citations["source"] = citations["source"].apply(lambda name: paper_idx[name])
citations["target"] = citations["target"].apply(lambda name: paper_idx[name])
papers["subject"] = papers["subject"].apply(lambda value: class_idx[value])
print(citations)
print(papers)
"""
### Split the dataset
"""
# Obtain random indices
random_indices = np.random.permutation(range(papers.shape[0]))
# 50/50 split
train_data = papers.iloc[random_indices[: len(random_indices) // 2]]
test_data = papers.iloc[random_indices[len(random_indices) // 2 :]]
"""
### Prepare the graph data
"""
# Obtain paper indices which will be used to gather node states
# from the graph later on when training the model
train_indices = train_data["paper_id"].to_numpy()
test_indices = test_data["paper_id"].to_numpy()
# Obtain ground truth labels corresponding to each paper_id
train_labels = train_data["subject"].to_numpy()
test_labels = test_data["subject"].to_numpy()
# Define graph, namely an edge tensor and a node feature tensor
edges = tf.convert_to_tensor(citations[["target", "source"]])
node_states = tf.convert_to_tensor(papers.sort_values("paper_id").iloc[:, 1:-1])
# Print shapes of the graph
print("Edges shape:\t\t", edges.shape)
print("Node features shape:", node_states.shape)
"""
## Build the model
GAT takes as input a graph (namely an edge tensor and a node feature tensor) and
outputs \[updated\] node states. The node states are, for each target node, neighborhood
aggregated information of *N*-hops (where *N* is decided by the number of layers of the
GAT). Importantly, in contrast to the
[graph convolutional network](https://arxiv.org/abs/1609.02907) (GCN)
the GAT makes use of attention machanisms
to aggregate information from neighboring nodes (or *source nodes*). In other words, instead of simply
averaging/summing node states from source nodes (*source papers*) to the target node (*target papers*),
GAT first applies normalized attention scores to each source node state and then sums.
"""
"""
### (Multi-head) graph attention layer
The GAT model implements multi-head graph attention layers. The `MultiHeadGraphAttention`
layer is simply a concatenation (or averaging) of multiple graph attention layers
(`GraphAttention`), each with separate learnable weights `W`. The `GraphAttention` layer
does the following:
Consider inputs node states `h^{l}` which are linearly transformed by `W^{l}`, resulting in `z^{l}`.
For each target node:
1. Computes pair-wise attention scores `a^{l}^{T}(z^{l}_{i}||z^{l}_{j})` for all `j`,
resulting in `e_{ij}` (for all `j`).
`||` denotes a concatenation, `_{i}` corresponds to the target node, and `_{j}`
corresponds to a given 1-hop neighbor/source node.
2. Normalizes `e_{ij}` via softmax, so as the sum of incoming edges' attention scores
to the target node (`sum_{k}{e_{norm}_{ik}}`) will add up to 1.
3. Applies attention scores `e_{norm}_{ij}` to `z_{j}`
and adds it to the new target node state `h^{l+1}_{i}`, for all `j`.
"""
class GraphAttention(layers.Layer):
def __init__(
self,
units,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
**kwargs,
):
super().__init__(**kwargs)
self.units = units
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[0][-1], self.units),
trainable=True,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name="kernel",
)
self.kernel_attention = self.add_weight(
shape=(self.units * 2, 1),
trainable=True,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name="kernel_attention",
)
self.built = True
def call(self, inputs):
node_states, edges = inputs
# Linearly transform node states
node_states_transformed = tf.matmul(node_states, self.kernel)
# (1) Compute pair-wise attention scores
node_states_expanded = tf.gather(node_states_transformed, edges)
node_states_expanded = tf.reshape(
node_states_expanded, (tf.shape(edges)[0], -1)
)
attention_scores = tf.nn.leaky_relu(
tf.matmul(node_states_expanded, self.kernel_attention)
)
attention_scores = tf.squeeze(attention_scores, -1)
# (2) Normalize attention scores
attention_scores = tf.math.exp(tf.clip_by_value(attention_scores, -2, 2))
attention_scores_sum = tf.math.unsorted_segment_sum(
data=attention_scores,
segment_ids=edges[:, 0],
num_segments=tf.reduce_max(edges[:, 0]) + 1,
)
attention_scores_sum = tf.repeat(
attention_scores_sum, tf.math.bincount(tf.cast(edges[:, 0], "int32"))
)
attention_scores_norm = attention_scores / attention_scores_sum
# (3) Gather node states of neighbors, apply attention scores and aggregate
node_states_neighbors = tf.gather(node_states_transformed, edges[:, 1])
out = tf.math.unsorted_segment_sum(
data=node_states_neighbors * attention_scores_norm[:, tf.newaxis],
segment_ids=edges[:, 0],
num_segments=tf.shape(node_states)[0],
)
return out
class MultiHeadGraphAttention(layers.Layer):
def __init__(self, units, num_heads=8, merge_type="concat", **kwargs):
super().__init__(**kwargs)
self.num_heads = num_heads
self.merge_type = merge_type
self.attention_layers = [GraphAttention(units) for _ in range(num_heads)]
def call(self, inputs):
atom_features, pair_indices = inputs
# Obtain outputs from each attention head
outputs = [
attention_layer([atom_features, pair_indices])
for attention_layer in self.attention_layers
]
# Concatenate or average the node states from each head
if self.merge_type == "concat":
outputs = tf.concat(outputs, axis=-1)
else:
outputs = tf.reduce_mean(tf.stack(outputs, axis=-1), axis=-1)
# Activate and return node states
return tf.nn.relu(outputs)
"""
### Implement training logic with custom `train_step`, `test_step`, and `predict_step` methods
Notice, the GAT model operates on the entire graph (namely, `node_states` and
`edges`) in all phases (training, validation and testing). Hence, `node_states` and
`edges` are passed to the constructor of the `keras.Model` and used as attributes.
The difference between the phases are the indices (and labels), which gathers
certain outputs (`tf.gather(outputs, indices)`).
"""
class GraphAttentionNetwork(keras.Model):
def __init__(
self,
node_states,
edges,
hidden_units,
num_heads,
num_layers,
output_dim,
**kwargs,
):
super().__init__(**kwargs)
self.node_states = node_states
self.edges = edges
self.preprocess = layers.Dense(hidden_units * num_heads, activation="relu")
self.attention_layers = [
MultiHeadGraphAttention(hidden_units, num_heads) for _ in range(num_layers)
]
self.output_layer = layers.Dense(output_dim)
def call(self, inputs):
node_states, edges = inputs
x = self.preprocess(node_states)
for attention_layer in self.attention_layers:
x = attention_layer([x, edges]) + x
outputs = self.output_layer(x)
return outputs
def train_step(self, data):
indices, labels = data
with tf.GradientTape() as tape:
# Forward pass
outputs = self([self.node_states, self.edges])
# Compute loss
loss = self.compiled_loss(labels, tf.gather(outputs, indices))
# Compute gradients
grads = tape.gradient(loss, self.trainable_weights)
# Apply gradients (update weights)
optimizer.apply_gradients(zip(grads, self.trainable_weights))
# Update metric(s)
self.compiled_metrics.update_state(labels, tf.gather(outputs, indices))
return {m.name: m.result() for m in self.metrics}
def predict_step(self, data):
indices = data
# Forward pass
outputs = self([self.node_states, self.edges])
# Compute probabilities
return tf.nn.softmax(tf.gather(outputs, indices))
def test_step(self, data):
indices, labels = data
# Forward pass
outputs = self([self.node_states, self.edges])
# Compute loss
loss = self.compiled_loss(labels, tf.gather(outputs, indices))
# Update metric(s)
self.compiled_metrics.update_state(labels, tf.gather(outputs, indices))
return {m.name: m.result() for m in self.metrics}
"""
### Train and evaluate
"""
# Define hyper-parameters
HIDDEN_UNITS = 100
NUM_HEADS = 8
NUM_LAYERS = 3
OUTPUT_DIM = len(class_values)
NUM_EPOCHS = 100
BATCH_SIZE = 256
VALIDATION_SPLIT = 0.1
LEARNING_RATE = 3e-1
MOMENTUM = 0.9
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.SGD(LEARNING_RATE, momentum=MOMENTUM)
accuracy_fn = keras.metrics.SparseCategoricalAccuracy(name="acc")
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_acc", min_delta=1e-5, patience=5, restore_best_weights=True
)
# Build model
gat_model = GraphAttentionNetwork(
node_states, edges, HIDDEN_UNITS, NUM_HEADS, NUM_LAYERS, OUTPUT_DIM
)
# Compile model
gat_model.compile(loss=loss_fn, optimizer=optimizer, metrics=[accuracy_fn])
gat_model.fit(
x=train_indices,
y=train_labels,
validation_split=VALIDATION_SPLIT,
batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS,
callbacks=[early_stopping],
verbose=2,
)
_, test_accuracy = gat_model.evaluate(x=test_indices, y=test_labels, verbose=0)
print("--" * 38 + f"\nTest Accuracy {test_accuracy*100:.1f}%")
"""
### Predict (probabilities)
"""
test_probs = gat_model.predict(x=test_indices)
mapping = {v: k for (k, v) in class_idx.items()}
for i, (probs, label) in enumerate(zip(test_probs[:10], test_labels[:10])):
print(f"Example {i+1}: {mapping[label]}")
for j, c in zip(probs, class_idx.keys()):
print(f"\tProbability of {c: <24} = {j*100:7.3f}%")
print("---" * 20)
"""
## Conclusions
The results look OK! The GAT model seems to correctly predict the subjects of the papers,
based on what they cite, about 80% of the time. Further improvements could be
made by fine-tuning the hyper-parameters of the GAT. For instance, try changing the number of layers,
the number of hidden units, or the optimizer/learning rate; add regularization (e.g., dropout);
or modify the preprocessing step. We could also try to implement *self-loops*
(i.e., paper X cites paper X) and/or make the graph *undirected*.
"""
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import json
import time
import base64
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from datetime import date, datetime
from decimal import Decimal
from MySQLdb.constants import FIELD_TYPE
from tempfile import NamedTemporaryFile
from six import string_types
PY3 = sys.version_info[0] == 3
class MySqlToGoogleCloudStorageOperator(BaseOperator):
"""
Copy data from MySQL to Google cloud storage in JSON format.
"""
template_fields = ('sql', 'bucket', 'filename', 'schema_filename', 'schema')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self,
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
mysql_conn_id='mysql_default',
google_cloud_storage_conn_id='google_cloud_default',
schema=None,
delegate_to=None,
*args,
**kwargs):
"""
:param sql: The SQL to execute on the MySQL table.
:type sql: string
:param bucket: The bucket to upload to.
:type bucket: string
:param filename: The filename to use as the object name when uploading
to Google cloud storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: string
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from MySQL.
:type schema_filename: string
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filenamed param docs above). Google cloud storage allows for files
to be a maximum of 4GB. This param allows developers to specify the
file size of the splits.
:type approx_max_file_size_bytes: long
:param mysql_conn_id: Reference to a specific MySQL hook.
:type mysql_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param schema: The schema to use, if any. Should be a list of dict or
a str. Pass a string if using Jinja template, otherwise, pass a list of
dict. Examples could be seen: https://cloud.google.com/bigquery/docs
/schemas#specifying_a_json_schema_file
:type schema: str or list
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
"""
super(MySqlToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.mysql_conn_id = mysql_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.schema = schema
self.delegate_to = delegate_to
def execute(self, context):
cursor = self._query_mysql()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.update(self._write_local_schema_file(cursor))
# Flush all files before uploading.
for file_handle in files_to_upload.values():
file_handle.flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for file_handle in files_to_upload.values():
file_handle.close()
def _query_mysql(self):
"""
Queries mysql and returns a cursor to the results.
"""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats.
# Convert binary type object to string encoded with base64.
row = self._convert_types(schema, col_type_dict, row)
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
s = json.dumps(row_dict)
if PY3:
s = s.encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema_str = None
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
if self.schema is not None and isinstance(self.schema, string_types):
schema_str = self.schema
elif self.schema is not None and isinstance(self.schema, list):
schema_str = json.dumps(self.schema)
else:
schema = []
for field in cursor.description:
# See PEP 249 for details about the description tuple.
field_name = field[0]
field_type = self.type_map(field[1])
# Always allow TIMESTAMP to be nullable. MySQLdb returns None types
# for required fields because some MySQL timestamps can't be
# represented by Python's datetime (e.g. 0000-00-00 00:00:00).
if field[6] or field_type == 'TIMESTAMP':
field_mode = 'NULLABLE'
else:
field_mode = 'REQUIRED'
schema.append({
'name': field_name,
'type': field_type,
'mode': field_mode,
})
schema_str = json.dumps(schema)
if PY3:
schema_str = schema_str.encode('utf-8')
tmp_schema_file_handle.write(schema_str)
self.log.info('Using schema for %s: %s', self.schema_filename, schema_str)
return {self.schema_filename: tmp_schema_file_handle}
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google cloud storage.
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
for object, tmp_file_handle in files_to_upload.items():
hook.upload(self.bucket, object, tmp_file_handle.name, 'application/json')
@staticmethod
def _convert_types(schema, col_type_dict, row):
"""
Takes a value from MySQLdb, and converts it to a value that's safe for
JSON/Google cloud storage/BigQuery. Dates are converted to UTC seconds.
Decimals are converted to floats. Binary type fields are encoded with base64,
as imported BYTES data must be base64-encoded according to Bigquery SQL
date type documentation: https://cloud.google.com/bigquery/data-types
"""
converted_row = []
for col_name, col_val in zip(schema, row):
if type(col_val) in (datetime, date):
col_val = time.mktime(col_val.timetuple())
elif isinstance(col_val, Decimal):
col_val = float(col_val)
elif col_type_dict.get(col_name) == "BYTES":
col_val = base64.standard_b64encode(col_val)
if PY3:
col_val = col_val.decode('ascii')
else:
col_val = col_val
converted_row.append(col_val)
return converted_row
def _get_col_type_dict(self):
"""
Return a dict of column name and column type based on self.schema if not None.
"""
schema = []
if isinstance(self.schema, string_types):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warn('Using default schema due to unexpected type.'
'Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warn('Using default schema due to missing name or type. Please '
'refer to: https://cloud.google.com/bigquery/docs/schemas'
'#specifying_a_json_schema_file')
return col_type_dict
@classmethod
def type_map(cls, mysql_type):
"""
Helper function that maps from MySQL fields to BigQuery fields. Used
when a schema_filename is set.
"""
d = {
FIELD_TYPE.INT24: 'INTEGER',
FIELD_TYPE.TINY: 'INTEGER',
FIELD_TYPE.BIT: 'INTEGER',
FIELD_TYPE.DATETIME: 'TIMESTAMP',
FIELD_TYPE.DATE: 'TIMESTAMP',
FIELD_TYPE.DECIMAL: 'FLOAT',
FIELD_TYPE.NEWDECIMAL: 'FLOAT',
FIELD_TYPE.DOUBLE: 'FLOAT',
FIELD_TYPE.FLOAT: 'FLOAT',
FIELD_TYPE.LONG: 'INTEGER',
FIELD_TYPE.LONGLONG: 'INTEGER',
FIELD_TYPE.SHORT: 'INTEGER',
FIELD_TYPE.TIMESTAMP: 'TIMESTAMP',
FIELD_TYPE.YEAR: 'INTEGER',
}
return d[mysql_type] if mysql_type in d else 'STRING'
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide utility functions for implementing the ``bokeh`` command.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import contextlib
import errno
import os
import sys
import warnings
from typing import Dict, Iterator, List
# Bokeh imports
from bokeh.application import Application
from bokeh.application.handlers import (
DirectoryHandler,
Handler,
NotebookHandler,
ScriptHandler,
)
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'build_single_handler_application',
'build_single_handler_applications',
'die',
'report_server_init_errors',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def die(message: str, status: int = 1) -> None:
''' Print an error message and exit.
This function will call ``sys.exit`` with the given ``status`` and the
process will terminate.
Args:
message (str) : error message to print
status (int) : the exit status to pass to ``sys.exit``
'''
print(message, file=sys.stderr)
sys.exit(status)
DIRSTYLE_MAIN_WARNING = """
It looks like you might be running the main.py of a directory app directly.
If this is the case, to enable the features of directory style apps, you must
call "bokeh serve" on the directory instead. For example:
bokeh serve my_app_dir/
If this is not the case, renaming main.py will suppress this warning.
"""
def build_single_handler_application(path: str, argv: List[str] | None = None) -> Application:
''' Return a Bokeh application built using a single handler for a script,
notebook, or directory.
In general a Bokeh :class:`~bokeh.application.application.Application` may
have any number of handlers to initialize |Document| objects for new client
sessions. However, in many cases only a single handler is needed. This
function examines the ``path`` provided, and returns an ``Application``
initialized with one of the following handlers:
* :class:`~bokeh.application.handlers.script.ScriptHandler` when ``path``
is to a ``.py`` script.
* :class:`~bokeh.application.handlers.notebook.NotebookHandler` when
``path`` is to an ``.ipynb`` Jupyter notebook.
* :class:`~bokeh.application.handlers.directory.DirectoryHandler` when
``path`` is to a directory containing a ``main.py`` script.
Args:
path (str) : path to a file or directory for creating a Bokeh
application.
argv (seq[str], optional) : command line arguments to pass to the
application handler
Returns:
:class:`~bokeh.application.application.Application`
Raises:
RuntimeError
Notes:
If ``path`` ends with a file ``main.py`` then a warning will be printed
regarding running directory-style apps by passing the directory instead.
'''
argv = argv or []
path = os.path.abspath(path)
handler: Handler
# There are certainly race conditions here if the file/directory is deleted
# in between the isdir/isfile tests and subsequent code. But it would be a
# failure if they were not there to begin with, too (just a different error)
if os.path.isdir(path):
handler = DirectoryHandler(filename=path, argv=argv)
elif os.path.isfile(path):
if path.endswith(".ipynb"):
handler = NotebookHandler(filename=path, argv=argv)
elif path.endswith(".py"):
if path.endswith("main.py"):
warnings.warn(DIRSTYLE_MAIN_WARNING)
handler = ScriptHandler(filename=path, argv=argv)
else:
raise ValueError("Expected a '.py' script or '.ipynb' notebook, got: '%s'" % path)
else:
raise ValueError("Path for Bokeh server application does not exist: %s" % path)
if handler.failed:
raise RuntimeError("Error loading %s:\n\n%s\n%s " % (path, handler.error, handler.error_detail))
application = Application(handler)
return application
def build_single_handler_applications(paths: List[str], argvs: Dict[str, List[str]] | None = None) -> Dict[str, Application]:
''' Return a dictionary mapping routes to Bokeh applications built using
single handlers, for specified files or directories.
This function iterates over ``paths`` and ``argvs`` and calls
:func:`~bokeh.command.util.build_single_handler_application` on each
to generate the mapping.
Args:
paths (seq[str]) : paths to files or directories for creating Bokeh
applications.
argvs (dict[str, list[str]], optional) : mapping of paths to command
line arguments to pass to the handler for each path
Returns:
dict[str, Application]
Raises:
RuntimeError
'''
applications: Dict[str, Application] = {}
argvs = argvs or {}
for path in paths:
application = build_single_handler_application(path, argvs.get(path, []))
route = application.handlers[0].url_path()
if not route:
if '/' in applications:
raise RuntimeError("Don't know the URL path to use for %s" % (path))
route = '/'
applications[route] = application
return applications
@contextlib.contextmanager
def report_server_init_errors(address: str | None = None, port: int | None = None, **kwargs: str) -> Iterator[None]:
''' A context manager to help print more informative error messages when a
``Server`` cannot be started due to a network problem.
Args:
address (str) : network address that the server will be listening on
port (int) : network address that the server will be listening on
Example:
.. code-block:: python
with report_server_init_errors(**server_kwargs):
server = Server(applications, **server_kwargs)
If there are any errors (e.g. port or address in already in use) then a
critical error will be logged and the process will terminate with a
call to ``sys.exit(1)``
'''
try:
yield
except OSError as e:
if e.errno == errno.EADDRINUSE:
log.critical("Cannot start Bokeh server, port %s is already in use", port)
elif e.errno == errno.EADDRNOTAVAIL:
log.critical("Cannot start Bokeh server, address '%s' not available", address)
else:
codename = errno.errorcode[e.errno]
log.critical("Cannot start Bokeh server [%s]: %r", codename, e)
sys.exit(1)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
#!/usr/bin/env python
# cheat.py
__author__ = "Kody Brown ([email protected])"
__created__ = "10/23/2013"
__copyright__ = "(C) 2013 Kody Brown. Released under the MIT License."
__contributors__ = ["Kody Brown"]
__version__ = "0.21"
import glob
import os
import sys
import shutil, errno
from urllib.request import urlopen
__debug = False
sheetPath = ""
downloadUrls = []
def debug( a, b = "", c = "", d = ""):
global __debug
if __debug:
if len(str(b)) > 0:
if a != None:
print("DEBUG:", a.ljust(14, ' ') + " =", b, c, d)
else:
print("DEBUG:", ' '.ljust(14, ' ') + " =", b, c, d)
else:
print("DEBUG:", a)
def getSheetPath():
try:
path = os.environ["AppData"]
except:
path = None
if path == None or len(path.strip()) == 0:
return "."
else:
p = os.path.join(path, ".cheats")
if os.path.exists(p) and not os.path.isdir(p):
raise ValueError("Could not create directory: a file of the same name is in the way")
if not os.path.isdir(p):
os.makedirs(p)
return p
def getDownloadUrls():
return [
"https://raw.github.com/kodybrown/cheats/master/files/"
]
def getFileName( filename ):
pos = filename.rfind(os.sep)
if pos > -1:
return filename[pos + 1:]
return ""
def validateFile( filename, forceDownload ):
global __debug, sheetPath, downloadUrls
destfile = os.path.join(sheetPath, filename)
debug('sheetPath', sheetPath)
debug('filename', filename)
debug('destfile', destfile)
if forceDownload or not os.path.exists(destfile):
for d in downloadUrls:
url = d + filename
debug('url', url)
if downloadFile(url, destfile):
break
if not os.path.exists(destfile):
print("could not find sheet..")
return False
else:
return True
def downloadFile( url, filename ):
global __debug, sheetPath, downloadUrls
if __debug:
print("saving '" + url + "'\r\n to '" + filename + "'")
try:
ret = urlopen(url)
if ret.code == 200:
output = open(filename, 'wb')
output.write(ret.read())
output.close()
if __debug:
print("successfully downloaded file")
return True
else:
print("failed to download file: " + str(ret.code))
return False
except:
print("failed to download file: an exception occurred")
return False
def listLocal():
global sheetPath, downloadUrls, noHeader
if not noHeader:
print("")
print("# Local Cheat Sheets")
print(" located at: " + sheetPath + "")
print("")
files = glob.glob(os.path.join(sheetPath, "*"))
fileIndex = 0
for f in files:
fileIndex += 1
print("" + getFileName(f))
if fileIndex == 0:
print("there are no local files")
def listServer():
global sheetPath, downloadUrls, noHeader
for u in downloadUrls:
if not noHeader:
print("")
print("# Server Cheat Sheets")
print(" location: " + u + "")
print(" files that exist locally are denoted with an asterisk (*)")
print("")
files = ["<not implemented>"] # TODO Get the list of files on the server..
fileIndex = 0
for f in files:
fileIndex += 1
islocal = "*" if os.path.exists(os.path.join(sheetPath, f)) else ""
print("" + f + islocal)
if fileIndex == 0:
print("there are no files on this server")
def removeSheet( sheetname ):
global sheetPath
fullpath = os.path.join(sheetPath, sheetname)
if os.path.exists(fullpath):
try:
os.remove(fullpath)
except OSError as e:
if e.errno != errno.ENOENT: # errno.ENOENT == no such file or directory
raise # re-raise exception if a different error occured
def usage():
print("cheat.py - command-line cheat sheets, that works in Windows.")
print("")
print("USAGE:")
print(" cheat.py [options] name")
print("")
print(" name The name of the cheet sheat to view.")
print(" If it does not already exist in the local cache it will be downloaded then displayed.")
print("")
print("OPTIONS:")
print(" --debug Outputs (a lot of) additional details about what is going on, etc.")
print("")
print(" --list Lists the local sheets.")
print(" --download Forces download even if it already exists locally.")
print("")
# TODO
# --search <keyword>
# --remove
# --create
# --update
# --push|upload
def main():
global __debug, sheetPath, downloadUrls, noHeader
sheetPath = getSheetPath()
downloadUrls = getDownloadUrls()
debug("sheetPath", sheetPath)
debug("downloadUrls", downloadUrls)
forceDownload = False
noHeader = False
args = []
if len(sys.argv) < 2:
usage()
sys.exit()
for a in sys.argv[1:]:
if len(a.strip()) == 0:
continue
debug("arg", a)
if a[:1] in ("-", "/", "!"):
while a[:1] in ("-", "/"):
a = a[1:]
al = a.lower()
if al in ("?", "help"):
usage()
sys.exit(0)
elif al == "debug":
__debug = True
elif al == "noheader":
noHeader = True
elif al in ("download", "force"):
forceDownload = True
elif al in ("!download", "!force"):
forceDownload = False
elif al in ("list", "listlocal"):
listLocal()
elif al in ("list-server", "listserver"):
listServer()
elif al.startswith("remove:") or al.startswith("delete:"):
removeSheet(a[7:])
else:
al = a.lower()
if al == "debug":
__debug = True
elif al == "force":
forceDownload = True
elif al in ("list", "listlocal", "list-local"):
listLocal()
elif al in ("listserver", "list-server"):
listServer()
elif al.startswith("remove:"):
removeSheet(a[7:])
elif a is not None and len(a) > 0:
args.append(a)
if __debug:
debug("forceDownload", forceDownload)
debug("sheetPath", sheetPath)
debug("args", args)
if len(args) > 0:
if not validateFile(args[0], forceDownload):
sys.exit(1)
destfile = os.path.join(sheetPath, args[0])
# with open(destfile, "r") as f:
# content = f.read()
# # TODO fancy print the file's content..
# print(content)
# my cat utility already handles outputting files via cli..
# https://github.com/kodybrown/cat
debug("executing", "cat -w --force-plugin:md " + destfile)
os.system("cat -w --force-plugin:markdown " + destfile)
sys.exit(0)
if __name__ == "__main__":
main()
|
|
# Copyright 2015, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" This is the main actions of Nuitka.
This can do all the steps to translate one module to a target language using
the Python C/API, to compile it to either an executable or an extension module.
"""
import os
import shutil
import subprocess
import sys
from logging import info, warning
from nuitka.importing import Importing, Recursion
from nuitka.plugins.PluginBase import Plugins
from nuitka.PythonVersions import isUninstalledPython
from nuitka.tree import SyntaxErrors
from nuitka.utils import InstanceCounters, Utils
from . import ModuleRegistry, Options, Tracing, TreeXML
from .build import SconsInterface
from .codegen import CodeGeneration, ConstantCodes, MainCodes
from .finalizations import Finalization
from .freezer.BytecodeModuleFreezer import (
addFrozenModule,
generateBytecodeFrozenCode,
getFrozenModuleCount
)
from .freezer.Standalone import (
copyUsedDLLs,
detectEarlyImports,
detectLateImports
)
from .optimizations import Optimization
from .tree import Building
def createNodeTree(filename):
""" Create a node tree.
Turn that source code into a node tree structure. If recursion into
imported modules is available, more trees will be available during
optimization, or immediately through recursed directory paths.
"""
# First, build the raw node tree from the source code.
main_module = Building.buildModuleTree(
filename = filename,
package = None,
is_top = True,
is_main = not Options.shallMakeModule()
)
ModuleRegistry.addRootModule(main_module)
# First remove old object files and old generated files, old binary or
# module, and standalone mode program directory if any, they can only do
# harm.
source_dir = getSourceDirectoryPath(main_module)
if not Options.shallOnlyExecCppCall():
cleanSourceDirectory(source_dir)
if Options.isStandaloneMode():
standalone_dir = getStandaloneDirectoryPath(main_module)
shutil.rmtree(standalone_dir, ignore_errors = True)
Utils.makePath(standalone_dir)
Utils.deleteFile(
path = getResultFullpath(main_module),
must_exist = False
)
# Second, do it for the directories given.
for plugin_filename in Options.getShallFollowExtra():
Recursion.checkPluginPath(
plugin_filename = plugin_filename,
module_package = None
)
for pattern in Options.getShallFollowExtraFilePatterns():
Recursion.checkPluginFilenamePattern(
pattern = pattern
)
# Then optimize the tree and potentially recursed modules.
Optimization.optimize()
return main_module
def dumpTreeXML(tree):
xml_root = tree.asXml()
TreeXML.dump(xml_root)
def displayTree(tree):
# Import only locally so the Qt4 dependency doesn't normally come into play
# when it's not strictly needed.
from .gui import TreeDisplay
TreeDisplay.displayTreeInspector(tree)
def getTreeFilenameWithSuffix(tree, suffix):
return tree.getOutputFilename() + suffix
def getSourceDirectoryPath(main_module):
assert main_module.isPythonModule()
return Options.getOutputPath(
path = Utils.basename(
getTreeFilenameWithSuffix(main_module, ".build")
)
)
def getStandaloneDirectoryPath(main_module):
return Options.getOutputPath(
path = Utils.basename(
getTreeFilenameWithSuffix(main_module, ".dist")
)
)
def getResultBasepath(main_module):
assert main_module.isPythonModule()
if Options.isStandaloneMode():
return Utils.joinpath(
getStandaloneDirectoryPath(main_module),
Utils.basename(
getTreeFilenameWithSuffix(main_module, "")
)
)
else:
return Options.getOutputPath(
path = Utils.basename(
getTreeFilenameWithSuffix(main_module, "")
)
)
def getResultFullpath(main_module):
result = getResultBasepath(main_module)
if Options.shallMakeModule():
if Utils.getOS() == "Windows":
result += ".pyd"
else:
result += ".so"
else:
result += ".exe"
return result
def cleanSourceDirectory(source_dir):
if Utils.isDir(source_dir):
for path, _filename in Utils.listDir(source_dir):
if Utils.getExtension(path) in (".cpp", ".hpp", ".c", ".o", ".os",
".obj", ".bin", ".res", ".rc",
".manifest"):
Utils.deleteFile(path, True)
else:
Utils.makePath(source_dir)
static_source_dir = Utils.joinpath(
source_dir,
"static"
)
if Utils.isDir(static_source_dir):
for path, _filename in Utils.listDir(static_source_dir):
if Utils.getExtension(path) in (".o", ".os", ".obj"):
Utils.deleteFile(path, True)
win32_source_dir = Utils.joinpath(
static_source_dir,
"win32_ucontext_src"
)
if Utils.getOS() == "Windows":
Utils.deleteFile(
Utils.joinpath(win32_source_dir, "fibers_win32.obj"),
False
)
def pickSourceFilenames(source_dir, modules):
collision_filenames = set()
seen_filenames = set()
# Our output.
module_filenames = {}
def getFilenames(module):
base_filename = Utils.joinpath(
source_dir,
"module." + module.getFullName()
if not module.isInternalModule() else
module.getFullName()
)
# Note: Could detect if the file system is cases sensitive in source_dir
# or not, but that's probably not worth the effort. False positives do
# no harm at all.
collision_filename = Utils.normcase(base_filename)
return base_filename, collision_filename
# First pass, check for collisions.
for module in modules:
if module.isPythonShlibModule():
continue
base_filename = base_filename, collision_filename = getFilenames(module)
if collision_filename in seen_filenames:
collision_filenames.add(collision_filename)
seen_filenames.add(collision_filename)
# Count up for colliding filenames.
collision_counts = {}
# Second pass, this time sorted, so we get deterministic results. We will
# apply an @1/@2 to disambiguate the filenames.
for module in sorted(modules, key = lambda x : x.getFullName()):
if module.isPythonShlibModule():
continue
base_filename = base_filename, collision_filename = getFilenames(module)
if collision_filename in collision_filenames:
collision_counts[ collision_filename ] = \
collision_counts.get(collision_filename, 0) + 1
hash_suffix = "@%d" % collision_counts[ collision_filename ]
else:
hash_suffix = ""
base_filename += hash_suffix
module_filenames[module] = base_filename + ".cpp"
return module_filenames
standalone_entry_points = []
def makeSourceDirectory(main_module):
# We deal with a lot of details here, but rather one by one, and split makes
# no sense, pylint: disable=R0912,R0914
assert main_module.isPythonModule()
# The global context used to generate code.
global_context = CodeGeneration.makeGlobalContext()
# Get the full list of modules imported, create code for all of them.
modules = ModuleRegistry.getDoneModules()
assert main_module in modules
# Sometimes we need to talk about all modules except main module.
other_modules = ModuleRegistry.getDoneUserModules()
# Lets check if the recurse-to modules are actually present, and warn the
# user if one was not found.
for any_case_module in Options.getShallFollowModules():
for module in other_modules:
if module.getFullName() == any_case_module:
break
else:
warning(
"Didn't recurse to '%s', apparently not used." % \
any_case_module
)
# Prepare code generation, i.e. execute finalization for it.
for module in sorted(modules, key = lambda x : x.getFullName()):
if module.isPythonModule():
Finalization.prepareCodeGeneration(module)
# Pick filenames.
source_dir = getSourceDirectoryPath(main_module)
module_filenames = pickSourceFilenames(
source_dir = source_dir,
modules = modules
)
# First pass, generate code and use constants doing so, but prepare the
# final code generation only, because constants code will be added at the
# end only.
prepared_modules = {}
for module in sorted(modules, key = lambda x : x.getFullName()):
if module.isPythonModule():
cpp_filename = module_filenames[module]
prepared_modules[cpp_filename] = CodeGeneration.prepareModuleCode(
global_context = global_context,
module = module,
module_name = module.getFullName(),
other_modules = other_modules
if module is main_module else
()
)
# Main code constants need to be allocated already too.
if module is main_module and not Options.shallMakeModule():
prepared_modules[cpp_filename][1].getConstantCode(0)
for module in sorted(modules, key = lambda x : x.getFullName()):
if module.isPythonModule():
cpp_filename = module_filenames[module]
template_values, module_context = prepared_modules[cpp_filename]
source_code = CodeGeneration.generateModuleCode(
module_context = module_context,
template_values = template_values
)
# The main of an executable module gets a bit different code.
if module is main_module and not Options.shallMakeModule():
source_code = MainCodes.generateMainCode(
main_module = main_module,
context = module_context,
codes = source_code
)
writeSourceCode(
filename = cpp_filename,
source_code = source_code
)
if Options.isShowInclusion():
info("Included compiled module '%s'." % module.getFullName())
elif module.isPythonShlibModule():
target_filename = Utils.joinpath(
getStandaloneDirectoryPath(main_module),
*module.getFullName().split('.')
)
if Utils.getOS() == "Windows":
target_filename += ".pyd"
else:
target_filename += ".so"
target_dir = Utils.dirname(target_filename)
if not Utils.isDir(target_dir):
Utils.makePath(target_dir)
shutil.copy(
module.getFilename(),
target_filename
)
standalone_entry_points.append(
(target_filename, module.getPackage())
)
else:
assert False, module
writeSourceCode(
filename = Utils.joinpath(source_dir, "__constants.cpp"),
source_code = ConstantCodes.getConstantsDefinitionCode(
context = global_context
)
)
helper_decl_code, helper_impl_code = CodeGeneration.generateHelpersCode()
writeSourceCode(
filename = Utils.joinpath(source_dir, "__helpers.hpp"),
source_code = helper_decl_code
)
writeSourceCode(
filename = Utils.joinpath(source_dir, "__helpers.cpp"),
source_code = helper_impl_code
)
def runScons(main_module, quiet):
# Scons gets transported many details, that we express as variables, and
# have checks for them, leading to many branches, pylint: disable=R0912
python_version = "%d.%d" % (sys.version_info[0], sys.version_info[1])
if hasattr(sys, "abiflags"):
if Options.isPythonDebug() or \
hasattr(sys, "getobjects"):
if sys.abiflags.startswith('d'):
python_version += sys.abiflags
else:
python_version += 'd' + sys.abiflags
else:
python_version += sys.abiflags
def asBoolStr(value):
return "true" if value else "false"
options = {
"name" : Utils.basename(
getTreeFilenameWithSuffix(main_module, "")
),
"result_name" : getResultBasepath(main_module),
"source_dir" : getSourceDirectoryPath(main_module),
"debug_mode" : asBoolStr(Options.isDebug()),
"python_debug" : asBoolStr(Options.isPythonDebug()),
"unstripped_mode" : asBoolStr(Options.isUnstripped()),
"module_mode" : asBoolStr(Options.shallMakeModule()),
"optimize_mode" : asBoolStr(Options.isOptimize()),
"full_compat" : asBoolStr(Options.isFullCompat()),
"experimental" : asBoolStr(Options.isExperimental()),
"trace_mode" : asBoolStr(Options.shallTraceExecution()),
"python_version" : python_version,
"target_arch" : Utils.getArchitecture(),
"python_prefix" : sys.prefix,
"nuitka_src" : SconsInterface.getSconsDataPath(),
"module_count" : "%d" % (
len(ModuleRegistry.getDoneUserModules()) + 1
)
}
# Ask Scons to cache on Windows, except where the directory is thrown
# away. On non-Windows you can should use ccache instead.
if not Options.isRemoveBuildDir() and Utils.getOS() == "Windows":
options["cache_mode"] = "true"
if Options.isLto():
options["lto_mode"] = "true"
if Options.shallDisableConsoleWindow():
options["win_disable_console"] = "true"
if Options.isStandaloneMode():
options["standalone_mode"] = "true"
if not Options.isStandaloneMode() and \
not Options.shallMakeModule() and \
isUninstalledPython():
options["win_copy_dll"] = "true"
if getFrozenModuleCount():
options["frozen_modules"] = str(
getFrozenModuleCount()
)
if Options.isShowScons():
options["show_scons"] = "true"
if Options.isMingw():
options["mingw_mode"] = "true"
if Options.getMsvcVersion():
msvc_version = Options.getMsvcVersion()
msvc_version = msvc_version.replace("exp", "Exp")
if '.' not in msvc_version:
msvc_version += ".0"
options["msvc_version"] = msvc_version
if Options.isClang():
options["clang_mode"] = "true"
if Options.getIconPath():
options["icon_path"] = Options.getIconPath()
return SconsInterface.runScons(options, quiet), options
def writeSourceCode(filename, source_code):
# Prevent accidental overwriting. When this happens the collision detection
# or something else has failed.
assert not Utils.isFile(filename), filename
if Utils.python_version >= 300:
with open(filename, "wb") as output_file:
output_file.write(source_code.encode("latin1"))
else:
with open(filename, 'w') as output_file:
output_file.write(source_code)
def writeBinaryData(filename, binary_data):
# Prevent accidental overwriting. When this happens the collision detection
# or something else has failed.
assert not Utils.isFile(filename), filename
assert type(binary_data) is bytes
with open(filename, "wb") as output_file:
output_file.write(binary_data)
def callExec(args, clean_path, add_path):
old_python_path = os.environ.get("PYTHONPATH", None)
if clean_path and old_python_path is not None:
os.environ["PYTHONPATH"] = ""
if add_path:
if "PYTHONPATH" in os.environ:
os.environ["PYTHONPATH"] += ':' + Options.getOutputDir()
else:
os.environ["PYTHONPATH"] = Options.getOutputDir()
# We better flush these, "os.execl" won't do it anymore.
sys.stdout.flush()
sys.stderr.flush()
# Add the main arguments, previous separated.
args += Options.getMainArgs()
Utils.callExec(args)
def executeMain(binary_filename, tree, clean_path):
main_filename = tree.getFilename()
if Options.isStandaloneMode():
name = binary_filename
elif main_filename.endswith(".py"):
name = main_filename[:-3]
else:
name = main_filename
name = Utils.abspath(name)
args = (binary_filename, name)
callExec(
clean_path = clean_path,
add_path = False,
args = args
)
def executeModule(tree, clean_path):
python_command = "__import__('%s')" % tree.getName()
args = (
sys.executable,
"python",
"-c",
python_command,
)
callExec(
clean_path = clean_path,
add_path = True,
args = args
)
def compileTree(main_module):
source_dir = getSourceDirectoryPath(main_module)
if not Options.shallOnlyExecCppCall():
# Now build the target language code for the whole tree.
makeSourceDirectory(
main_module = main_module
)
if Options.isStandaloneMode():
for late_import in detectLateImports():
addFrozenModule(late_import)
if getFrozenModuleCount():
frozen_code = generateBytecodeFrozenCode()
writeSourceCode(
filename = Utils.joinpath(
source_dir,
"__frozen.cpp"
),
source_code = frozen_code
)
writeBinaryData(
filename = Utils.joinpath(source_dir, "__constants.bin"),
binary_data = ConstantCodes.stream_data.getBytes()
)
else:
source_dir = getSourceDirectoryPath(main_module)
if not Utils.isFile(Utils.joinpath(source_dir, "__helpers.hpp")):
sys.exit("Error, no previous build directory exists.")
if Options.isShowProgress() or Options.isShowMemory():
Tracing.printLine(
"Total memory usage before running scons: {memory}:".format(
memory = Utils.getHumanReadableProcessMemoryUsage()
)
)
if Options.isShowMemory():
InstanceCounters.printStats()
if Options.shallNotDoExecCppCall():
return True, {}
# Run the Scons to build things.
result, options = runScons(
main_module = main_module,
quiet = not Options.isShowScons()
)
return result, options
data_files = []
def main():
""" Main program flow of Nuitka
At this point, options will be parsed already, Nuitka will be executing
in the desired version of Python with desired flags, and we just get
to execute the task assigned.
We might be asked to only re-compile generated C++, dump only an XML
representation of the internal node tree after optimization, etc.
"""
# Main has to fullfil many options, leading to many branches and statements
# to deal with them. pylint: disable=R0912
positional_args = Options.getPositionalArgs()
assert len(positional_args) > 0
filename = Options.getPositionalArgs()[0]
# Inform the importing layer about the main script directory, so it can use
# it when attempting to follow imports.
Importing.setMainScriptDirectory(
main_dir = Utils.dirname(Utils.abspath(filename))
)
# Detect to be frozen modules if any, so we can consider to not recurse
# to them.
if Options.isStandaloneMode():
for early_import in detectEarlyImports():
addFrozenModule(early_import)
if early_import[0] == "site":
origin_prefix_filename = Utils.joinpath(
Utils.dirname(early_import[3]),
"orig-prefix.txt"
)
if Utils.isFile(origin_prefix_filename):
data_files.append(
(filename, "orig-prefix.txt")
)
# Turn that source code into a node tree structure.
try:
main_module = createNodeTree(
filename = filename
)
except (SyntaxError, IndentationError) as e:
# Syntax or indentation errors, output them to the user and abort.
sys.exit(
SyntaxErrors.formatOutput(e)
)
if Options.shallDumpBuiltTreeXML():
for module in ModuleRegistry.getDoneModules():
dumpTreeXML(module)
elif Options.shallDisplayBuiltTree():
displayTree(main_module)
else:
result, options = compileTree(
main_module = main_module
)
# Exit if compilation failed.
if not result:
sys.exit(1)
if Options.shallNotDoExecCppCall():
sys.exit(0)
# Remove the source directory (now build directory too) if asked to.
if Options.isRemoveBuildDir():
shutil.rmtree(
getSourceDirectoryPath(main_module)
)
if Options.isStandaloneMode():
binary_filename = options["result_name"] + ".exe"
standalone_entry_points.insert(
0,
(binary_filename, None)
)
dist_dir = getStandaloneDirectoryPath(main_module)
for module in ModuleRegistry.getDoneUserModules():
standalone_entry_points.extend(
Plugins.considerExtraDlls(dist_dir, module)
)
if Utils.getOS() == "NetBSD":
warning("Standalone mode on NetBSD is not functional, due to $ORIGIN linkage not being supported.")
copyUsedDLLs(
dist_dir = dist_dir,
standalone_entry_points = standalone_entry_points
)
for source_filename, target_filename in data_files:
shutil.copy2(
source_filename,
Utils.joinpath(
getStandaloneDirectoryPath(main_module),
target_filename
)
)
# Modules should not be executable, but Scons creates them like it, fix
# it up here.
if Utils.getOS() != "Windows" and Options.shallMakeModule():
subprocess.call(
(
"chmod",
"-x",
getResultFullpath(main_module)
)
)
# Execute the module immediately if option was given.
if Options.shallExecuteImmediately():
if Options.shallMakeModule():
executeModule(
tree = main_module,
clean_path = Options.shallClearPythonPathEnvironment()
)
else:
executeMain(
binary_filename = getResultFullpath(main_module),
tree = main_module,
clean_path = Options.shallClearPythonPathEnvironment()
)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test pruning code
# ********
# WARNING:
# This test uses 4GB of disk space.
# This test takes 30 mins or more (up to 2 hours)
# ********
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
import os
MIN_BLOCKS_TO_KEEP = 288
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
RESCAN_WINDOW = 2 * 60 * 60
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 6
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
self.nodes.append(start_node(4, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
# Create nodes 5 to test wallet in prune mode, but do not connect
self.nodes.append(start_node(5, self.options.tmpdir, ["-debug=0", "-prune=550"]))
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:5])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
print("Success")
print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir))
print("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 30:
raise AssertionError("blk00000.dat not pruned when it should be")
print("Success")
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
self.stop_node(0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
print("Current block height:", height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
print("Invalidating block at height:",invalidheight,badhash)
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
print("New best height", self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
print("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
print("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
print("Verify height on node 2:",self.nodes[2].getblockcount())
print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir))
print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
for i in range(22):
# This can be slow, so do this in multiple RPC calls to avoid
# RPC timeouts.
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3], timeout=300)
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
try:
self.nodes[2].getblock(self.forkhash)
raise AssertionError("Old block wasn't pruned so can't test redownload")
except JSONRPCException as e:
print("Will need to redownload block",self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
print("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0"], timewait=900)
assert_equal(node.getblockcount(), 995)
assert_raises_message(JSONRPCException, "not in prune mode", node.pruneblockchain, 500)
self.stop_node(node_number)
# now re-start in manual pruning mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=1"], timewait=900)
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + RESCAN_WINDOW
else:
return index
def prune(index, expected_ret=None):
ret = node.pruneblockchain(height(index))
# Check the return value. When use_timestamp is True, just check
# that the return value is less than or equal to the expected
# value, because when more than one block is generated per second,
# a timestamp will not be granular enough to uniquely identify an
# individual block.
if expected_ret is None:
expected_ret = index
if use_timestamp:
assert_greater_than(ret, 0)
assert_greater_than(expected_ret + 1, ret)
else:
assert_equal(ret, expected_ret)
def has_block(index):
return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_message(JSONRPCException, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# negative heights should raise an exception
assert_raises_message(JSONRPCException, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# Does nothing
node.pruneblockchain(height(0))
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# height=500 should prune first file
prune(500)
if has_block(0):
raise AssertionError("blk00000.dat is still there, should be pruned by now")
if not has_block(1):
raise AssertionError("blk00001.dat is missing when should still be there")
# height=650 should prune second file
prune(650)
if has_block(1):
raise AssertionError("blk00001.dat is still there, should be pruned by now")
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
if not has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
if has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
if has_block(3):
raise AssertionError("blk00003.dat is still there, should be pruned by now")
# stop node, start back up with auto-prune at 550MB, make sure still runs
self.stop_node(node_number)
self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=550"], timewait=900)
print("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
print("Stop and start pruning node to trigger wallet rescan")
try:
self.stop_node(2)
start_node(2, self.options.tmpdir, ["-debug=1","-prune=550"])
print("Success")
except Exception as detail:
raise AssertionError("Wallet test: unable to re-start the pruning node")
# check that wallet loads loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
print ("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300)
try:
self.stop_node(5) #stop and start to trigger rescan
start_node(5, self.options.tmpdir, ["-debug=1","-prune=550"])
print ("Success")
except Exception as detail:
raise AssertionError("Wallet test: unable to re-start node5")
def run_test(self):
print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
print("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
print("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
print("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
print("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
print("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
print("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
print("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
print("Test wallet re-scan")
self.wallet_test()
print("Done")
if __name__ == '__main__':
PruneTest().main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import geo_target_constant
from google.ads.googleads.v8.services.types import geo_target_constant_service
from .base import GeoTargetConstantServiceTransport, DEFAULT_CLIENT_INFO
class GeoTargetConstantServiceGrpcTransport(GeoTargetConstantServiceTransport):
"""gRPC backend transport for GeoTargetConstantService.
Service to fetch geo target constants.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_geo_target_constant(
self,
) -> Callable[
[geo_target_constant_service.GetGeoTargetConstantRequest],
geo_target_constant.GeoTargetConstant,
]:
r"""Return a callable for the get geo target constant method over gRPC.
Returns the requested geo target constant in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetGeoTargetConstantRequest],
~.GeoTargetConstant]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_geo_target_constant" not in self._stubs:
self._stubs[
"get_geo_target_constant"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.GeoTargetConstantService/GetGeoTargetConstant",
request_serializer=geo_target_constant_service.GetGeoTargetConstantRequest.serialize,
response_deserializer=geo_target_constant.GeoTargetConstant.deserialize,
)
return self._stubs["get_geo_target_constant"]
@property
def suggest_geo_target_constants(
self,
) -> Callable[
[geo_target_constant_service.SuggestGeoTargetConstantsRequest],
geo_target_constant_service.SuggestGeoTargetConstantsResponse,
]:
r"""Return a callable for the suggest geo target constants method over gRPC.
Returns GeoTargetConstant suggestions by location name or by
resource name.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__
`GeoTargetConstantSuggestionError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.SuggestGeoTargetConstantsRequest],
~.SuggestGeoTargetConstantsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "suggest_geo_target_constants" not in self._stubs:
self._stubs[
"suggest_geo_target_constants"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.GeoTargetConstantService/SuggestGeoTargetConstants",
request_serializer=geo_target_constant_service.SuggestGeoTargetConstantsRequest.serialize,
response_deserializer=geo_target_constant_service.SuggestGeoTargetConstantsResponse.deserialize,
)
return self._stubs["suggest_geo_target_constants"]
__all__ = ("GeoTargetConstantServiceGrpcTransport",)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from decimal import Decimal
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut
from test_framework.script import CScript, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, satoshi_round
from test_framework.script_util import DUMMY_P2WPKH_SCRIPT
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return tx.serialize().hex()
def make_utxo(node, amount, confirmed=True, scriptPubKey=DUMMY_P2WPKH_SCRIPT):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
new_addr = node.getnewaddress()
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
if txout['scriptPubKey']['addresses'] == [new_addr]:
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransactionwithwallet(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], 0)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert new_size < mempool_size
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [
[
"-acceptnonstdtxn=1",
"-maxorphantx=1000",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101",
],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Leave IBD
self.nodes[0].generate(1)
make_utxo(self.nodes[0], 1*COIN)
# Ensure nodes are synced
self.sync_all()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test RPC...")
self.test_rpc()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("Passed")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# make_utxo may have generated a bunch of blocks, so we need to sync
# before we can spend the coins generated, or else the resulting
# transactions might not be accepted by our peers.
self.sync_all()
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
self.sync_all()
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT + b'a')]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Extra 0.1 BTC fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = txToHex(tx1b)
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
mempool = self.nodes[0].getrawmempool()
assert tx1a_txid not in mempool
assert tx1b_txid in mempool
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1, OP_DROP] * 15 + [1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, 0)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30 * COIN, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, 0)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert doublespent_txid not in mempool
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert len(tx.serialize()) < 100000
txid = self.nodes[0].sendrawtransaction(tx_hex, 0)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
# 1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n - 1 * COIN, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, 0)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert tx.hash not in mempool
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2 * fee * n, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1_hex = txToHex(tx1)
self.nodes[0].sendrawtransaction(tx1_hex, 0)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, 0)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value - fee, DUMMY_P2WPKH_SCRIPT)]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, 0)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, 0)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, 0)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# This transaction isn't shown as replaceable
assert_equal(self.nodes[0].getmempoolentry(tx1a_txid)['bip125-replaceable'], False)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = txToHex(tx1b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, 0)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2b_hex = txToHex(tx2b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, 0)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
tx3a_txid = self.nodes[0].sendrawtransaction(tx3a_hex, 0)
# This transaction is shown as replaceable
assert_equal(self.nodes[0].getmempoolentry(tx3a_txid)['bip125-replaceable'], True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, 0)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, 0)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
assert tx1b_txid in self.nodes[0].getrawmempool()
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx2a_hex = txToHex(tx2a)
self.nodes[0].sendrawtransaction(tx2a_hex, 0)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, 0)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, 0)
assert tx2b_txid in self.nodes[0].getrawmempool()
def test_rpc(self):
us0 = self.nodes[0].listunspent()[0]
ins = [us0]
outs = {self.nodes[0].getnewaddress() : Decimal(1.0000000)}
rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True)
rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False)
json0 = self.nodes[0].decoderawtransaction(rawtx0)
json1 = self.nodes[0].decoderawtransaction(rawtx1)
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967295)
rawtx2 = self.nodes[0].createrawtransaction([], outs)
frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True})
frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False})
json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex'])
json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex'])
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967294)
if __name__ == '__main__':
ReplaceByFeeTest().main()
|
|
"""Command Line Interface to opyd objects"""
from __future__ import print_function
import time
import functools
import sys
from contextlib import contextmanager, closing
from StringIO import StringIO
import collections
from IPython.utils.coloransi import TermColors as tc
from epics import caget, caput
from ..controls.positioner import EpicsMotor, Positioner, PVPositioner
from ..session import get_session_manager
session_mgr = get_session_manager()
try:
logbook = session_mgr['olog_client']
except KeyError:
logbook = None
__all__ = ['mov',
'movr',
'set_pos',
'wh_pos',
'set_lm',
'log_pos',
'log_pos_diff'
]
# Global Defs of certain strings
FMT_LEN = 18
FMT_PREC = 6
def ensure(*ensure_args):
def wrap(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
# First check if we have an iterable first
# on the first arg
# if not then make these all lists
if len(args) > 0:
if not hasattr(args[0], "__iter__"):
args = tuple([[a] for a in args])
# Now do type checking ignoring None
for n, (arg, t) in enumerate(zip(args, ensure_args)):
if t is not None:
for x in arg:
if not isinstance(x, t):
raise TypeError("Incorect type in parameter list.\n"
"Parameter at position {} "
"is expected to be an instance of "
"{}".format(n, t))
f(*args, **kwargs)
return wrapper
return wrap
@ensure(Positioner, None)
def mov(positioner, position):
"""Move positioners to given positions
Move positioners using the move method of the Positioner class.
Parameters
----------
positioner : Positioner or list
Positioners to move
position : float or list of float
Values to move positioners to.
Examples
--------
Move a single positioner `slt1_xc` to 10::
>>>mov(slt1_xc, 10)
Move positioner `slt1_xg` and `slt1_yg` to 2 and 3 respectively::
>>>mov([slt1_xg, slt1_yg], [2, 3])
"""
print('\n ', end='')
print(tc.Green, end='')
for p in positioner:
print_string(p.name)
print("\n")
# Start Moving all Positioners in context manager to catch
# Keyboard interrupts
# TODO : This should be a utility function
pos_prec = []
for p in positioner:
if hasattr(p, 'precision'):
pos_prec.append(p.precision)
else:
pos_prec.append(FMT_PREC)
with catch_keyboard_interrupt(positioner):
stat = [p.move(v, wait=False) for p, v in
zip(positioner, position)]
# The loop below ensures that at least a couple prints
# will happen
flag = 0
done = False
while not all(s.done for s in stat) or (flag < 2):
print(tc.LightGreen, end='')
print(' ', end='')
for p, prec in zip(positioner, pos_prec):
print_value(p.position, egu=p.egu, prec=prec)
print('\n')
print('\033[2A', end='')
time.sleep(0.01)
done = all(s.done for s in stat)
if done:
flag += 1
print(tc.Normal + '\n')
@ensure(Positioner, None)
def movr(positioner, position):
"""Move positioners relative to their current positon.
See Also
--------
mov : move positioners to an absolute position.
"""
_start_val = [p.position for p in positioner]
for v in _start_val:
if v is None:
raise IOError("Unable to read motor position for relative move")
_new_val = [a + b for a, b in zip(_start_val, position)]
mov(positioner, _new_val)
@ensure(Positioner, None)
def set_lm(positioner, limits):
"""Set the limits of the positioner
Sets the limits of the positioner or list of positioners. For EpicsMotors
the fields .HLM and .LLM are set to the high and low limits respectively.
For PVPositioners the .DRVH and .DRVL fields are set on the setopoint
record. If neither method works then an IOError is raised.
Parameters
----------
positioner : positioner or list of positioners
limits : single or list of tuple of form (+ve, -ve) limits
Raises
------
IOError
If the caput (EPICS put) fails then an IOError is raised.
Examples
--------
Set the limits of motor `m1` to (10, -10)::
>>>set_lm(slt1_xc, (10, -10))
Set the limits of motors `m1` and `m2` to (2, -2) and (3, -3)
respectively::
>>>set_lm([m1, m2], [[2,-2], [3, -3]])
"""
print('')
msg = ''
high_fields = []
low_fields = []
for p in positioner:
if isinstance(p, EpicsMotor):
high_fields.append(p._record + '.HLM')
low_fields.append(p._record + '.LLM')
elif isinstance(p, PVPositioner):
high_fields.append(p.setpoint_pvname[0] + '.DRVH')
low_fields.append(p.setpoint_pvname[0] + '.DRVL')
else:
raise TypeError("Positioners must be EpicsMotors or PVPositioners"
"to set the limits")
for p, lim, high_field, low_field in zip(positioner,
limits,
high_fields, low_fields):
lim1 = max(lim)
lim2 = min(lim)
if not caput(high_field, lim1):
raise IOError("Unable to set high limit for {}"
" writing to PV {}.".format(p.name, high_field))
msg += "Upper limit set to {:.{prec}g} for positioner {}\n".format(
lim1, p.name, prec=FMT_PREC)
if not caput(low_field, lim2):
raise IOError("Unable to set low limit for {}"
" writing to PV {}.".format(p.name, low_field))
msg += "Lower limit set to {:.{prec}g} for positioner {}\n".format(
lim2, p.name, prec=FMT_PREC)
print(msg)
if logbook:
logbook.log(msg)
@ensure(Positioner, (float, int))
def set_pos(positioner, position):
"""Set the position of a positioner
Set the position of a positioner or positioners to the value position.
This function only works for EpicsMotors (Based on the EPICS Motor Record)
and uses the .OFF field to set the current position to the value passed to
the function.
Parameters
----------
positioner : Positioner or list of positioners.
position : float or list of floats.
New position of positioners
Raises
------
TypeError
If positioner is not an instance of an EpicsMotor.
Examples
--------
Set the position of motor m1 to 4::
>>>set_pos(m1, 4)
Set the position of motors m1 and m2 to 1 and 2 respectively::
>>>set_pos([m1, m2], [1, 2])
Raises:
TypeError: If positioner is not an instance of an EpicsMotor.
"""
for p in positioner:
if not isinstance(p, EpicsMotor):
raise TypeError("Positioner {} must be an EpicsMotor"
"to set position.".format(p.name))
# Get the current offset
offset_pvs = [p._record + ".OFF" for p in positioner]
dial_pvs = [p._record + ".DRBV" for p in positioner]
old_offsets = [caget(p) for p in offset_pvs]
dial = [caget(p) for p in dial_pvs]
for v in old_offsets + dial:
if v is None:
raise ValueError("Could not read or invalid value for current"
"position of positioners")
new_offsets = [a - b for a, b in zip(position, dial)]
msg = ''
for o, old_o, p in zip(new_offsets, old_offsets, positioner):
if caput(p._record + '.OFF', o):
msg += 'Motor {0} set to position {1} (Offset = {2} was {3})\n'\
.format(p.name, p.position, o, old_o)
else:
print('Unable to set position of positioner {0}'.format(p.name))
print(msg)
lmsg = logbook_add_objects(positioner, dial_pvs + offset_pvs)
logbook.log(msg + '\n' + lmsg)
@ensure(Positioner)
def wh_pos(positioners=None):
"""Get the current position of Positioners and print to screen.
Print to the screen the position of the positioners in a formated table.
If positioners is None then get all registered positioners from the
session manager.
Parameters
----------
positioners : Positioner, list of Positioners or None
See Also
--------
log_pos : Log positioner values to logbook
Examples
--------
List all positioners::
>>>wh_pos()
List positioners `m1`, `m2` and `m3`::
>>>wh_pos([m1, m2, m3])
"""
if positioners is None:
pos = session_mgr.get_positioners()
positioners = [pos[k] for k in sorted(pos)]
_print_pos(positioners, file=sys.stdout)
@ensure(Positioner)
def log_pos(positioners=None):
"""Get the current position of Positioners and make a logbook entry.
Print to the screen the position of the positioners and make a logbook
text entry. If positioners is None then get all registered positioners
from the session manager. This routine also creates session information
in the logbook so positions can be recovered.
Parameters
----------
positioners : Positioner, list of Positioners or None
Returns
-------
int
The ID of the logbook entry returned by the logbook.log method.
"""
if positioners is None:
pos = session_mgr.get_positioners()
positioners = [pos[k] for k in sorted(pos)]
msg = ''
with closing(StringIO()) as sio:
_print_pos(positioners, file=sio)
msg += sio.getvalue()
print(msg)
# Add the text representation of the positioners
msg += logbook_add_objects(positioners)
# Create the property for storing motor posisions
pdict = {}
pdict['objects'] = repr(positioners)
pdict['values'] = repr({p.name: p.position for p in positioners})
# make the logbook entry
id = logbook.log(msg, properties={'OphydPositioners': pdict},
ensure=True)
print('Logbook positions added as Logbook ID {}'.format(id))
return id
def log_pos_mov(id=None, dry_run=False, positioners=None, **kwargs):
"""Move to positions located in logboook
This function moves to positions recorded in the experimental logbook using
the :py:func:`log_pos` function.
Parameters
----------
id : integer, optional
ID of logbook entry to search for and move positions to.
dry_run : bool, optional
If True, do not move motors, but execute a dry_run
positioners : list, optional
List of string names of positioners to compare and move. Other
positioners in the log entry will be ignored.
"""
logpos, objects = logbook_to_objects(id, **kwargs)
objects = collections.OrderedDict(sorted(objects.items()))
if positioners is not None:
keys = set(positioners).intersection(set(objects.keys()))
objects = {x: objects[x] for x in keys}
print('')
stat = []
for key, value in objects.iteritems():
newpos = logpos[key]
oldpos = value.position
try:
if not dry_run:
stat.append(value.move(newpos, wait=False))
except:
print('{}[!!] Unable to move positioner {}'
.format(tc.Red, tc.Normal))
else:
print('{}[**] Moving positioner {} to {}'
' from current position of {}{}`'
.format(tc.Green, value.name, newpos,
oldpos, tc.Normal))
print('\n{}Waiting for positioners to complete .....'
.format(tc.LightGreen), end='')
sys.stdout.flush()
if len(stat) > 0:
while all(s.done for s in stat):
time.sleep(0.01)
print(' Done{}\n'.format(tc.Normal))
def log_pos_diff(id=None, positioners=None, **kwargs):
"""Move to positions located in logboook
This function compares positions recorded in the experimental logbook
using the :py:func:`log_pos` function.
Parameters
----------
id : integer
ID of logbook entry to search for and move positions to.
positioners : list
List of string names of positioners to compare. Other positioners
in the log entry will be ignored.
"""
oldpos, objects = logbook_to_objects(id, **kwargs)
objects = collections.OrderedDict(sorted(objects.items()))
# Cycle through positioners and compare position with old value
# If we have an error, print a warning
diff = []
pos = []
values = []
if positioners is not None:
keys = set(positioners).intersection(set(objects.keys()))
objects = {x: objects[x] for x in keys}
print('')
for key, value in objects.iteritems():
try:
diff.append(value.position - oldpos[key])
pos.append(value)
values.append(value.position)
except:
print('{}[!!] Unable to compare positioner {}{}'
.format(tc.Red, key, tc.Normal))
print_header(len=3*(FMT_LEN+3)+1)
print_string('Positioner', pre='| ', post=' | ')
print_string('Value', post=' | ')
print_string('Difference', post=' |\n')
print_header(len=3*(FMT_LEN+3)+1)
for p, v, d in zip(pos, values, diff):
print_string(p.name, pre='| ', post=' | ')
print_value(v, egu=p.egu, post=' | ')
print_value(d, egu=p.egu, post=' |\n')
print_header(len=3*(FMT_LEN+3)+1)
print('')
def logbook_to_objects(id=None, **kwargs):
"""Search the logbook and return positioners"""
if logbook is None:
raise NotImplemented("No logbook is avaliable")
entry = logbook.find(id=id, **kwargs)
if len(entry) != 1:
raise ValueError("Search of logbook was not unique, please refine"
"search")
try:
prop = entry[0]['properties']['OphydPositioners']
except KeyError:
raise KeyError('No property in log entry with positioner information')
try:
obj = eval(prop['objects'])
val = eval(prop['values'])
except:
raise RuntimeError('Unable to create objects from log entry')
objects = {o.name: o for o in obj}
return val, objects
def logbook_add_objects(objects, extra_pvs=None):
"""Add to the logbook aditional information on ophyd objects.
This routine takes objects and possible extra pvs and adds to the log entry
information which is not printed to stdout/stderr.
Parameters
----------
objects : Ophyd objects
Objects to add to log entry.
extra_pvs : List of strings
Extra PVs to include in report
"""
msg = ''
msg += '{:^43}|{:^22}|{:^50}\n'.format('PV Name', 'Name', 'Value')
msg += '{:-^120}\n'.format('')
# Make a list of all PVs and positioners
pvs = [o.report['pv'] for o in objects]
names = [o.name for o in objects]
values = [str(o.value) for o in objects]
if extra_pvs is not None:
pvs += extra_pvs
names += ['None' for e in extra_pvs]
values += [caget(e) for e in extra_pvs]
for a, b, c in zip(pvs, names, values):
msg += 'PV:{:<40} {:<22} {:<50}\n'.format(a, b, c)
return msg
def print_header(title='', char='-', len=80, file=sys.stdout):
print('{:{char}^{len}}'.format(title, char=char, len=len), file=file)
def print_string(val, size=FMT_LEN, pre='', post=' ', file=sys.stdout):
print('{}{:<{size}}{}'.format(pre, val, post, size=size), end='', file=file)
def print_value(val, prec=FMT_PREC, egu='', **kwargs):
if val is not None:
print_string('{: .{fmt}f} {}'.format(val, egu, fmt=prec), **kwargs)
else:
print_string('', **kwargs)
def blink(on=True, file=sys.stdout):
if on:
print("\x1b[?25h", end='', file=file)
else:
print("\x1b[?25l", end='', file=file)
@contextmanager
def catch_keyboard_interrupt(positioners):
"""Context manager to capture Keyboard Interrupt and stop motors
This context manager should be used when moving positioners via the cli
to capture the keyboardInterrupt and ensure that motors are stopped and
clean up the output to the screen.
"""
blink(False)
try:
yield
except KeyboardInterrupt:
print(tc.Red + "[!!] ABORTED "
": Commanding all positioners to stop.")
for p in positioners:
p.stop()
print("{}[--] Stopping {}{}".format(tc.Red, tc.LightRed, p.name))
print(tc.Normal, end='')
blink(True)
def _print_pos(positioners, file=sys.stdout):
"""Pretty Print the positioners to file"""
print('', file=file)
pos = [p.position for p in positioners]
# Print out header
print_header(len=4*(FMT_LEN+3)+1, file=file)
print_string('Positioner', pre='| ', post=' | ', file=file)
print_string('Value', post=' | ', file=file)
print_string('Low Limit', post=' | ', file=file)
print_string('High Limit', post=' |\n', file=file)
print_header(len=4*(FMT_LEN+3)+1, file=file)
for p, v in zip(positioners, pos):
print_string(p.name, pre='| ', post=' | ', file=file)
if v is not None:
if hasattr(p, 'precision'):
prec = p.precision
else:
prec = FMT_PREC
print_value(v, egu=p.egu, prec=prec,
post=' | ', file=file)
else:
print_string('INVALID', post=' | ', file=file)
print_value(p.low_limit, egu=p.egu, post=' | ', file=file)
print_value(p.high_limit, egu=p.egu, post=' |\n', file=file)
print_header(len=4*(FMT_LEN+3)+1, file=file)
print('', file=file)
|
|
"""Profile the memory usage of a Python program"""
# .. we'll use this to pass it to the child script ..
_clean_globals = globals().copy()
__version__ = '0.32'
_CMD_USAGE = "python -m memory_profiler script_file.py"
import time
import sys
import os
import pdb
import warnings
import linecache
import inspect
import subprocess
from copy import copy
import logging
# TODO: provide alternative when multprocessing is not available
try:
from multiprocessing import Process, Pipe
except ImportError:
from multiprocessing.dummy import Process, Pipe
_TWO_20 = float(2 ** 20)
has_psutil = False
# .. get available packages ..
try:
import psutil
has_psutil = True
except ImportError:
pass
def _get_memory(pid, timestamps=False, include_children=False):
# .. only for current process and only on unix..
if pid == -1:
pid = os.getpid()
# .. cross-platform but but requires psutil ..
if has_psutil:
process = psutil.Process(pid)
try:
# avoid useing get_memory_info since it does not exists
# in psutil > 2.0 and accessing it will cause exception.
meminfo_attr = 'memory_info' if hasattr(process, 'memory_info') else 'get_memory_info'
mem = getattr(process, meminfo_attr)()[0] / _TWO_20
if include_children:
for p in process.get_children(recursive=True):
mem += getattr(process, meminfo_attr)()[0] / _TWO_20
if timestamps:
return (mem, time.time())
else:
return mem
except psutil.AccessDenied:
pass
# continue and try to get this from ps
# .. scary stuff ..
if os.name == 'posix':
if include_children:
raise NotImplementedError('The psutil module is required when to'
' monitor memory usage of children'
' processes')
warnings.warn("psutil module not found. memory_profiler will be slow")
# ..
# .. memory usage in MiB ..
# .. this should work on both Mac and Linux ..
# .. subprocess.check_output appeared in 2.7, using Popen ..
# .. for backwards compatibility ..
out = subprocess.Popen(['ps', 'v', '-p', str(pid)],
stdout=subprocess.PIPE
).communicate()[0].split(b'\n')
try:
vsz_index = out[0].split().index(b'RSS')
mem = float(out[1].split()[vsz_index]) / 1024
if timestamps:
return(mem, time.time())
else:
return mem
except:
if timestamps:
return (-1, time.time())
else:
return -1
else:
raise NotImplementedError('The psutil module is required for non-unix '
'platforms')
class MemTimer(Process):
"""
Fetch memory consumption from over a time interval
"""
def __init__(self, monitor_pid, interval, pipe, max_usage=False,
*args, **kw):
self.monitor_pid = monitor_pid
self.interval = interval
self.pipe = pipe
self.cont = True
self.max_usage = max_usage
self.n_measurements = 1
if "timestamps" in kw:
self.timestamps = kw["timestamps"]
del kw["timestamps"]
else:
self.timestamps = False
if "include_children" in kw:
self.include_children = kw["include_children"]
del kw["include_children"]
else:
self.include_children = False
# get baseline memory usage
self.mem_usage = [
_get_memory(self.monitor_pid, timestamps=self.timestamps,
include_children=self.include_children)]
super(MemTimer, self).__init__(*args, **kw)
def run(self):
self.pipe.send(0) # we're ready
stop = False
while True:
cur_mem = _get_memory(self.monitor_pid, timestamps=self.timestamps,
include_children=self.include_children)
if not self.max_usage:
self.mem_usage.append(cur_mem)
else:
self.mem_usage[0] = max(cur_mem, self.mem_usage[0])
self.n_measurements += 1
if stop:
break
stop = self.pipe.poll(self.interval)
# do one more iteration
self.pipe.send(self.mem_usage)
self.pipe.send(self.n_measurements)
def memory_usage(proc=-1, interval=.1, timeout=None, timestamps=False,
include_children=False, max_usage=False, retval=False,
stream=None):
"""
Return the memory usage of a process or piece of code
Parameters
----------
proc : {int, string, tuple, subprocess.Popen}, optional
The process to monitor. Can be given by an integer/string
representing a PID, by a Popen object or by a tuple
representing a Python function. The tuple contains three
values (f, args, kw) and specifies to run the function
f(*args, **kw).
Set to -1 (default) for current process.
interval : float, optional
Interval at which measurements are collected.
timeout : float, optional
Maximum amount of time (in seconds) to wait before returning.
max_usage : bool, optional
Only return the maximum memory usage (default False)
retval : bool, optional
For profiling python functions. Save the return value of the profiled
function. Return value of memory_usage becomes a tuple:
(mem_usage, retval)
timestamps : bool, optional
if True, timestamps of memory usage measurement are collected as well.
stream : File
if stream is a File opened with write access, then results are written
to this file instead of stored in memory and returned at the end of
the subprocess. Useful for long-running processes.
Implies timestamps=True.
Returns
-------
mem_usage : list of floating-poing values
memory usage, in MiB. It's length is always < timeout / interval
if max_usage is given, returns the two elements maximum memory and
number of measurements effectuated
ret : return value of the profiled function
Only returned if retval is set to True
"""
if stream is not None:
timestamps = True
if not max_usage:
ret = []
else:
ret = -1
if timeout is not None:
max_iter = int(timeout / interval)
elif isinstance(proc, int):
# external process and no timeout
max_iter = 1
else:
# for a Python function wait until it finishes
max_iter = float('inf')
if hasattr(proc, '__call__'):
proc = (proc, (), {})
if isinstance(proc, (list, tuple)):
if len(proc) == 1:
f, args, kw = (proc[0], (), {})
elif len(proc) == 2:
f, args, kw = (proc[0], proc[1], {})
elif len(proc) == 3:
f, args, kw = (proc[0], proc[1], proc[2])
else:
raise ValueError
while True:
child_conn, parent_conn = Pipe() # this will store MemTimer's results
p = MemTimer(os.getpid(), interval, child_conn, timestamps=timestamps,
max_usage=max_usage, include_children=include_children)
p.start()
parent_conn.recv() # wait until we start getting memory
returned = f(*args, **kw)
parent_conn.send(0) # finish timing
ret = parent_conn.recv()
n_measurements = parent_conn.recv()
if retval:
ret = ret, returned
p.join(5 * interval)
if n_measurements > 4 or interval < 1e-6:
break
interval /= 10.
elif isinstance(proc, subprocess.Popen):
# external process, launched from Python
line_count = 0
while True:
if not max_usage:
mem_usage = _get_memory(proc.pid, timestamps=timestamps,
include_children=include_children)
if stream is not None:
stream.write("MEM {0:.6f} {1:.4f}\n".format(*mem_usage))
else:
ret.append(mem_usage)
else:
ret = max([ret,
_get_memory(proc.pid,
include_children=include_children)])
time.sleep(interval)
line_count += 1
# flush every 50 lines. Make 'tail -f' usable on profile file
if line_count > 50:
line_count = 0
if stream is not None:
stream.flush()
if timeout is not None:
max_iter -= 1
if max_iter == 0:
break
if proc.poll() is not None:
break
else:
# external process
if max_iter == -1:
max_iter = 1
counter = 0
while counter < max_iter:
counter += 1
if not max_usage:
mem_usage = _get_memory(proc, timestamps=timestamps,
include_children=include_children)
if stream is not None:
stream.write("MEM {0:.6f} {1:.4f}\n".format(*mem_usage))
else:
ret.append(mem_usage)
else:
ret = max([ret,
_get_memory(proc, include_children=include_children)
])
time.sleep(interval)
# Flush every 50 lines.
if counter % 50 == 0 and stream is not None:
stream.flush()
if stream:
return None
return ret
# ..
# .. utility functions for line-by-line ..
def _find_script(script_name):
""" Find the script.
If the input is not a file, then $PATH will be searched.
"""
if os.path.isfile(script_name):
return script_name
path = os.getenv('PATH', os.defpath).split(os.pathsep)
for folder in path:
if not folder:
continue
fn = os.path.join(folder, script_name)
if os.path.isfile(fn):
return fn
sys.stderr.write('Could not find script {0}\n'.format(script_name))
raise SystemExit(1)
class _TimeStamperCM(object):
"""Time-stamping context manager."""
def __init__(self, timestamps):
self._timestamps = timestamps
def __enter__(self):
self._timestamps.append(_get_memory(os.getpid(), timestamps=True))
def __exit__(self, *args):
self._timestamps.append(_get_memory(os.getpid(), timestamps=True))
class TimeStamper:
""" A profiler that just records start and end execution times for
any decorated function.
"""
def __init__(self):
self.functions = {}
def __call__(self, func=None, precision=None):
if func is not None:
if not hasattr(func, "__call__"):
raise ValueError("Value must be callable")
self.add_function(func)
f = self.wrap_function(func)
f.__module__ = func.__module__
f.__name__ = func.__name__
f.__doc__ = func.__doc__
f.__dict__.update(getattr(func, '__dict__', {}))
return f
else:
def inner_partial(f):
return self.__call__(f, precision=precision)
return inner_partial
def timestamp(self, name="<block>"):
"""Returns a context manager for timestamping a block of code."""
# Make a fake function
func = lambda x: x
func.__module__ = ""
func.__name__ = name
self.add_function(func)
timestamps = []
self.functions[func].append(timestamps)
# A new object is required each time, since there can be several
# nested context managers.
return _TimeStamperCM(timestamps)
def add_function(self, func):
if not func in self.functions:
self.functions[func] = []
def wrap_function(self, func):
""" Wrap a function to timestamp it.
"""
def f(*args, **kwds):
# Start time
timestamps = [_get_memory(os.getpid(), timestamps=True)]
self.functions[func].append(timestamps)
try:
result = func(*args, **kwds)
finally:
# end time
timestamps.append(_get_memory(os.getpid(), timestamps=True))
return result
return f
def show_results(self, stream=None):
if stream is None:
stream = sys.stdout
for func, timestamps in self.functions.items():
function_name = "%s.%s" % (func.__module__, func.__name__)
for ts in timestamps:
stream.write("FUNC %s %.4f %.4f %.4f %.4f\n" % (
(function_name,) + ts[0] + ts[1]))
class LineProfiler(object):
""" A profiler that records the amount of memory for each line """
def __init__(self, **kw):
self.code_map = {}
self.enable_count = 0
self.max_mem = kw.get('max_mem', None)
self.prevline = None
self.include_children = kw.get('include_children', False)
def __call__(self, func=None, precision=1):
if func is not None:
self.add_function(func)
f = self.wrap_function(func)
f.__module__ = func.__module__
f.__name__ = func.__name__
f.__doc__ = func.__doc__
f.__dict__.update(getattr(func, '__dict__', {}))
return f
else:
def inner_partial(f):
return self.__call__(f, precision=precision)
return inner_partial
def add_code(self, code, toplevel_code=None):
if code not in self.code_map:
self.code_map[code] = {}
for subcode in filter(inspect.iscode, code.co_consts):
self.add_code(subcode)
def add_function(self, func):
""" Record line profiling information for the given Python function.
"""
try:
# func_code does not exist in Python3
code = func.__code__
except AttributeError:
warnings.warn("Could not extract a code object for the object %r"
% func)
else:
self.add_code(code)
def wrap_function(self, func):
""" Wrap a function to profile it.
"""
def f(*args, **kwds):
self.enable_by_count()
try:
result = func(*args, **kwds)
finally:
self.disable_by_count()
return result
return f
def run(self, cmd):
""" Profile a single executable statement in the main namespace.
"""
# TODO: can this be removed ?
import __main__
main_dict = __main__.__dict__
return self.runctx(cmd, main_dict, main_dict)
def runctx(self, cmd, globals, locals):
""" Profile a single executable statement in the given namespaces.
"""
self.enable_by_count()
try:
exec(cmd, globals, locals)
finally:
self.disable_by_count()
return self
def enable_by_count(self):
""" Enable the profiler if it hasn't been enabled before.
"""
if self.enable_count == 0:
self.enable()
self.enable_count += 1
def disable_by_count(self):
""" Disable the profiler if the number of disable requests matches the
number of enable requests.
"""
if self.enable_count > 0:
self.enable_count -= 1
if self.enable_count == 0:
self.disable()
def trace_memory_usage(self, frame, event, arg):
"""Callback for sys.settrace"""
if (event in ('call', 'line', 'return')
and frame.f_code in self.code_map):
if event != 'call':
# "call" event just saves the lineno but not the memory
mem = _get_memory(-1, include_children=self.include_children)
# if there is already a measurement for that line get the max
old_mem = self.code_map[frame.f_code].get(self.prevline, 0)
self.code_map[frame.f_code][self.prevline] = max(mem, old_mem)
self.prevline = frame.f_lineno
if self._original_trace_function is not None:
(self._original_trace_function)(frame, event, arg)
return self.trace_memory_usage
def trace_max_mem(self, frame, event, arg):
# run into PDB as soon as memory is higher than MAX_MEM
if event in ('line', 'return') and frame.f_code in self.code_map:
c = _get_memory(-1)
if c >= self.max_mem:
t = ('Current memory {0:.2f} MiB exceeded the maximum'
''.format(c) + 'of {0:.2f} MiB\n'.format(self.max_mem))
sys.stdout.write(t)
sys.stdout.write('Stepping into the debugger \n')
frame.f_lineno -= 2
p = pdb.Pdb()
p.quitting = False
p.stopframe = frame
p.returnframe = None
p.stoplineno = frame.f_lineno - 3
p.botframe = None
return p.trace_dispatch
if self._original_trace_function is not None:
(self._original_trace_function)(frame, event, arg)
return self.trace_max_mem
def __enter__(self):
self.enable_by_count()
def __exit__(self, exc_type, exc_val, exc_tb):
self.disable_by_count()
def enable(self):
self._original_trace_function = sys.gettrace()
if self.max_mem is not None:
sys.settrace(self.trace_max_mem)
else:
sys.settrace(self.trace_memory_usage)
def disable(self):
sys.settrace(self._original_trace_function)
def show_results(prof, stream=None, precision=1):
if stream is None:
stream = sys.stdout
template = '{0:>6} {1:>12} {2:>12} {3:<}'
for code in prof.code_map:
lines = prof.code_map[code]
if not lines:
# .. measurements are empty ..
continue
filename = code.co_filename
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
stream.write('Filename: ' + filename + '\n\n')
if not os.path.exists(filename):
stream.write('ERROR: Could not find file ' + filename + '\n')
if any([filename.startswith(k) for k in
("ipython-input", "<ipython-input")]):
print("NOTE: %mprun can only be used on functions defined in "
"physical files, and not in the IPython environment.")
continue
all_lines = linecache.getlines(filename)
sub_lines = inspect.getblock(all_lines[code.co_firstlineno - 1:])
linenos = range(code.co_firstlineno,
code.co_firstlineno + len(sub_lines))
header = template.format('Line #', 'Mem usage', 'Increment',
'Line Contents')
stream.write(header + '\n')
stream.write('=' * len(header) + '\n')
mem_old = lines[min(lines.keys())]
float_format = '{0}.{1}f'.format(precision + 4, precision)
template_mem = '{0:' + float_format + '} MiB'
for line in linenos:
mem = ''
inc = ''
if line in lines:
mem = lines[line]
inc = mem - mem_old
mem_old = mem
mem = template_mem.format(mem)
inc = template_mem.format(inc)
stream.write(template.format(line, mem, inc, all_lines[line - 1]))
stream.write('\n\n')
# A lprun-style %mprun magic for IPython.
def magic_mprun(self, parameter_s=''):
""" Execute a statement under the line-by-line memory profiler from the
memory_profiler module.
Usage:
%mprun -f func1 -f func2 <statement>
The given statement (which doesn't require quote marks) is run via the
LineProfiler. Profiling is enabled for the functions specified by the -f
options. The statistics will be shown side-by-side with the code through
the pager once the statement has completed.
Options:
-f <function>: LineProfiler only profiles functions and methods it is told
to profile. This option tells the profiler about these functions. Multiple
-f options may be used. The argument may be any expression that gives
a Python function or method object. However, one must be careful to avoid
spaces that may confuse the option parser. Additionally, functions defined
in the interpreter at the In[] prompt or via %run currently cannot be
displayed. Write these functions out to a separate file and import them.
One or more -f options are required to get any useful results.
-T <filename>: dump the text-formatted statistics with the code
side-by-side out to a text file.
-r: return the LineProfiler object after it has completed profiling.
-c: If present, add the memory usage of any children process to the report.
"""
try:
from StringIO import StringIO
except ImportError: # Python 3.x
from io import StringIO
# Local imports to avoid hard dependency.
from distutils.version import LooseVersion
import IPython
ipython_version = LooseVersion(IPython.__version__)
if ipython_version < '0.11':
from IPython.genutils import page
from IPython.ipstruct import Struct
from IPython.ipapi import UsageError
else:
from IPython.core.page import page
from IPython.utils.ipstruct import Struct
from IPython.core.error import UsageError
# Escape quote markers.
opts_def = Struct(T=[''], f=[])
parameter_s = parameter_s.replace('"', r'\"').replace("'", r"\'")
opts, arg_str = self.parse_options(parameter_s, 'rf:T:c', list_all=True)
opts.merge(opts_def)
global_ns = self.shell.user_global_ns
local_ns = self.shell.user_ns
# Get the requested functions.
funcs = []
for name in opts.f:
try:
funcs.append(eval(name, global_ns, local_ns))
except Exception as e:
raise UsageError('Could not find function %r.\n%s: %s' % (name,
e.__class__.__name__, e))
include_children = 'c' in opts
profile = LineProfiler(include_children=include_children)
for func in funcs:
profile(func)
# Add the profiler to the builtins for @profile.
try:
import builtins
except ImportError: # Python 3x
import __builtin__ as builtins
if 'profile' in builtins.__dict__:
had_profile = True
old_profile = builtins.__dict__['profile']
else:
had_profile = False
old_profile = None
builtins.__dict__['profile'] = profile
try:
try:
profile.runctx(arg_str, global_ns, local_ns)
message = ''
except SystemExit:
message = "*** SystemExit exception caught in code being profiled."
except KeyboardInterrupt:
message = ("*** KeyboardInterrupt exception caught in code being "
"profiled.")
finally:
if had_profile:
builtins.__dict__['profile'] = old_profile
# Trap text output.
stdout_trap = StringIO()
show_results(profile, stdout_trap)
output = stdout_trap.getvalue()
output = output.rstrip()
if ipython_version < '0.11':
page(output, screen_lines=self.shell.rc.screen_length)
else:
page(output)
print(message,)
text_file = opts.T[0]
if text_file:
with open(text_file, 'w') as pfile:
pfile.write(output)
print('\n*** Profile printout saved to text file %s. %s' % (text_file,
message))
return_value = None
if 'r' in opts:
return_value = profile
return return_value
def _func_exec(stmt, ns):
# helper for magic_memit, just a function proxy for the exec
# statement
exec(stmt, ns)
# a timeit-style %memit magic for IPython
def magic_memit(self, line=''):
"""Measure memory usage of a Python statement
Usage, in line mode:
%memit [-r<R>t<T>i<I>] statement
Options:
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 1
-t<T>: timeout after <T> seconds. Default: None
-i<I>: Get time information at an interval of I times per second.
Defaults to 0.1 so that there is ten measurements per second.
-c: If present, add the memory usage of any children process to the report.
Examples
--------
::
In [1]: import numpy as np
In [2]: %memit np.zeros(1e7)
maximum of 1: 76.402344 MiB per loop
In [3]: %memit np.ones(1e6)
maximum of 1: 7.820312 MiB per loop
In [4]: %memit -r 10 np.empty(1e8)
maximum of 10: 0.101562 MiB per loop
"""
opts, stmt = self.parse_options(line, 'r:t:i:c', posix=False, strict=False)
repeat = int(getattr(opts, 'r', 1))
if repeat < 1:
repeat == 1
timeout = int(getattr(opts, 't', 0))
if timeout <= 0:
timeout = None
interval = float(getattr(opts, 'i', 0.1))
include_children = 'c' in opts
# I've noticed we get less noisier measurements if we run
# a garbage collection first
import gc
gc.collect()
mem_usage = 0
counter = 0
baseline = memory_usage()[0]
while counter < repeat:
counter += 1
tmp = memory_usage((_func_exec, (stmt, self.shell.user_ns)),
timeout=timeout, interval=interval, max_usage=True,
include_children=include_children)
mem_usage = max(mem_usage, tmp[0])
if mem_usage:
print('peak memory: %.02f MiB, increment: %.02f MiB' %
(mem_usage, mem_usage - baseline))
else:
print('ERROR: could not read memory usage, try with a lower interval '
'or more iterations')
def load_ipython_extension(ip):
"""This is called to load the module as an IPython extension."""
ip.define_magic('mprun', magic_mprun)
ip.define_magic('memit', magic_memit)
def profile(func=None, stream=None, precision=1):
"""
Decorator that will run the function and print a line-by-line profile
"""
if func is not None:
def wrapper(*args, **kwargs):
prof = LineProfiler()
val = prof(func)(*args, **kwargs)
show_results(prof, stream=stream, precision=precision)
return val
return wrapper
else:
def inner_wrapper(f):
return profile(f, stream=stream, precision=precision)
return inner_wrapper
class LogFile(object):
"""File-like object to log text using the `logging` module and the log report can be customised."""
def __init__(self, name=None, reportIncrementFlag=False):
"""
:param name: name of the logger module
reportIncrementFlag: This must be set to True if only the steps with memory increments are to be reported
:type self: object
name: string
reportIncrementFlag: bool
"""
self.logger = logging.getLogger(name)
self.reportIncrementFlag = reportIncrementFlag
def write(self, msg, level=logging.INFO):
if self.reportIncrementFlag:
if "MiB" in msg and float(msg.split("MiB")[1].strip())>0:
self.logger.log(level, msg)
elif msg.__contains__("Filename:") or msg.__contains__("Line Contents"):
self.logger.log(level, msg)
else:
self.logger.log(level, msg)
def flush(self):
for handler in self.logger.handlers:
handler.flush()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser(usage=_CMD_USAGE, version=__version__)
parser.disable_interspersed_args()
parser.add_option(
"--pdb-mmem", dest="max_mem", metavar="MAXMEM",
type="float", action="store",
help="step into the debugger when memory exceeds MAXMEM")
parser.add_option(
'--precision', dest="precision", type="int",
action="store", default=3,
help="precision of memory output in number of significant digits")
parser.add_option("-o", dest="out_filename", type="str",
action="store", default=None,
help="path to a file where results will be written")
parser.add_option("--timestamp", dest="timestamp", default=False,
action="store_true",
help="""print timestamp instead of memory measurement for
decorated functions""")
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args # Remove every memory_profiler arguments
if options.timestamp:
prof = TimeStamper()
else:
prof = LineProfiler(max_mem=options.max_mem)
__file__ = _find_script(args[0])
try:
if sys.version_info[0] < 3:
# we need to ovewrite the builtins to have profile
# globally defined (global variables is not enough
# for all cases, e.g. a script that imports another
# script where @profile is used)
import __builtin__
__builtin__.__dict__['profile'] = prof
ns = copy(_clean_globals)
ns['profile'] = prof # shadow the profile decorator defined above
execfile(__file__, ns, ns)
else:
import builtins
builtins.__dict__['profile'] = prof
ns = copy(_clean_globals)
ns['profile'] = prof # shadow the profile decorator defined above
exec(compile(open(__file__).read(), __file__, 'exec'), ns, ns)
finally:
if options.out_filename is not None:
out_file = open(options.out_filename, "a")
else:
out_file = sys.stdout
if options.timestamp:
prof.show_results(stream=out_file)
else:
show_results(prof, precision=options.precision, stream=out_file)
|
|
import re
from JumpScale import j
import JumpScale.sal.disklayout.mount as mount
import JumpScale.sal.disklayout.lsblk as lsblk
_formatters = {
# specific format command per filesystem.
'ntfs': lambda name, fstype: 'mkfs.ntfs -f {name}'.format(name=name)
}
isValidFS = lambda v: v.startswith('ext') or v in ('btrfs', 'ntfs')
_hrd_validators = {
'filesystem': isValidFS
}
def _default_formatter(name, fstype):
return 'mkfs.{fstype} {name}'.format(
fstype=fstype,
name=name
)
class PartitionError(Exception):
pass
class FormatError(Exception):
pass
class DiskError(Exception):
pass
class BlkInfo:
def __init__(self, name, type, size, executor):
self.name = name
self.type = type
self.size = int(size)
self.hrd = None
if executor is None:
self._executor = j.tools.executor.getLocal()
else:
self._executor = executor
self._executor = j.sal.disklayout._executor
def __str__(self):
return '%s %s' % (self.name, self.size)
def __repr__(self):
return str(self)
def mount(self):
"""
Mount partition to `mountpath` defined in HRD
"""
if self.invalid:
raise PartitionError('Partition is invalid')
if self.hrd is None:
raise PartitionError('No HRD attached to disk')
path = self.hrd.get('mountpath')
mnt = mount.Mount(self.name, path)
mnt.mount()
self.refresh()
def umount(self):
"""
Unmount partition
"""
if self.invalid:
raise PartitionError('Partition is invalid')
if self.hrd is None:
raise PartitionError('No HRD attached to disk')
path = self.hrd.get('mountpath')
mnt = mount.Mount(self.name, path)
mnt.umount()
self.refresh()
def unsetAutoMount(self):
"""
remote partition from fstab
"""
fstabpath = j.tools.path.get('/etc/fstab')
fstab = fstabpath.text().splitlines()
dirty = False
for i in range(len(fstab) - 1, -1, -1):
line = fstab[i]
if line.startswith('UUID=%s' % self.uuid):
del fstab[i]
dirty = True
if not dirty:
return
fstabpath.write_text('\n'.join(fstab))
fstabpath.chmod(644)
def setAutoMount(self, options='defaults', _dump=0, _pass=0):
"""
Configure partition auto mount `fstab` on `mountpath` defined in HRD
"""
if self.hrd is None:
path = self.mountpoint
if path == "":
raise RuntimeError("path cannot be empty")
path = j.tools.path.get(path)
else:
path = j.tools.path.get(self.hrd.get('mountpath'))
path.makedirs_p()
fstabpath = j.tools.path.get('/etc/fstab')
fstab = fstabpath.text().splitlines()
for i in range(len(fstab) - 1, -1, -1):
line = fstab[i]
if line.startswith('UUID=%s' % self.uuid):
del fstab[i]
dirty = True
break
if path is None:
return
entry = ('UUID={uuid}\t{path}\t{fstype}' +
'\t{options}\t{_dump}\t{_pass}\n').format(
uuid=self.uuid,
path=path,
fstype=self.fstype,
options=options,
_dump=_dump,
_pass=_pass
)
fstab.append(entry)
fstabpath.write_text('\n'.join(fstab)),
fstabpath.chmod(644)
def _validateHRD(self, hrd):
for field in ['filesystem', 'mountpath', 'protected', 'type']:
if not hrd.exists(field):
raise PartitionError(
'Invalid hrd, missing mandatory field "%s"' % field
)
if field in _hrd_validators:
validator = _hrd_validators[field]
value = hrd.get(field)
if not validator(value):
raise PartitionError('Invalid valud for %s: %s' % (
field, value
))
class DiskInfo(BlkInfo):
"""
Represents a disk
"""
def __init__(self, name, size, mountpoint="", fstype="", uuid="", executor=None):
super(DiskInfo, self).__init__(name, 'disk', size, executor=executor)
self.mountpoint = mountpoint
self.fstype = fstype
self.uuid = uuid
self.partitions = list()
# self.mirrors=[]
self.mirror_devices = []
if self.fstype == "btrfs":
devsfound = []
out = self._executor.execute(
"btrfs filesystem show %s" % self.name)[1]
for line in out.split("\n"):
line = line.strip()
if line.startswith("devid "):
dev = line.split("/dev/")[-1]
dev = dev.strip(" /")
devsfound.append(dev)
if len(devsfound) > 1:
# found mirror
self.mirror_devices = [
"/dev/%s" % item for item in devsfound if "/dev/%s" % item != name]
# def _getpart(self):
# rc, ptable = self._executor.execute(
# 'parted -sm {name} unit B print'.format(name=self.name)
# )
# read_disk_next = False
# disk = {}
# partitions = []
# for line in ptable.splitlines():
# line = line.strip()
# if line == 'BYT;':
# read_disk_next = True
# continue
# parts = line.split(':')
# if read_disk_next:
# # /dev/sdb:8589934592B:scsi:512:512:gpt:ATA VBOX HARDDISK;
# size = int(parts[1][:-1])
# table = parts[5]
# disk.update(
# size=size,
# table=table,
# )
# read_disk_next = False
# continue
# # 1:1048576B:2097151B:1048576B:btrfs:primary:;
# partition = {
# 'number': int(parts[0]),
# 'start': int(parts[1][:-1]),
# 'end': int(parts[2][:-1]),
# }
# partitions.append(partition)
# disk['partitions'] = partitions
# return disk
def _findFreeSpot(self, parts, size):
if size > parts['size']:
return
start = 20 * 1024 # start from 20k offset.
for partition in parts['partitions']:
if partition['start'] - start > size:
return start, start + size
start = partition['end'] + 1
if start + size > parts['size']:
return
return start, start + size
def format(self, size, hrd):
"""
Create new partition and format it as configured in hrd file
:size: in bytes
:hrd: the disk hrd info
Note:
hrd file must contain the following
filesystem = '<fs-type>'
mountpath = '<mount-path>'
protected = 0 or 1
type = data or root or tmp
"""
self._validateHRD(hrd)
if not self.partitions:
# if no partitions, make sure to clear mbr to convert to gpt
self._clearMBR()
parts = self._getpart()
spot = self._findFreeSpot(parts, size)
if not spot:
raise Exception('No enough space on disk to allocate')
start, end = spot
try:
self._executor.execute(
('parted -s {name} unit B ' +
'mkpart primary {start} {end}').format(name=self.name,
start=start,
end=end)
)
except Exception as e:
raise FormatError(e)
numbers = [p['number'] for p in parts['partitions']]
newparts = self._getpart()
newnumbers = [p['number'] for p in newparts['partitions']]
number = list(set(newnumbers).difference(numbers))[0]
partition = PartitionInfo(
name='%s%d' % (self.name, number),
size=size,
uuid='',
fstype='',
mount='',
device=self
)
partition.hrd = hrd
partition.format()
self.partitions.append(partition)
return partition
def _clearMBR(self):
try:
self._executor.execute(
'parted -s {name} mktable gpt'.format(name=self.name)
)
except Exception as e:
raise DiskError(e)
def erase(self, force=False):
"""
Clean up disk by deleting all non protected partitions
if force=True, delete ALL partitions included protected
:force: delete protected partitions, default=False
"""
if force:
self._clearMBR()
return
for partition in self.partitions:
if not partition.protected:
partition.delete()
class PartitionInfo(BlkInfo):
def __init__(self, name, size, uuid, fstype, mount, device, executor=None):
super(PartitionInfo, self).__init__(name, 'part', size, executor=executor)
self.uuid = uuid
self.fstype = fstype
self.mountpoint = mount
self.hrd = None
self.device = device
self._invalid = False
self.mount = device.mount
@property
def invalid(self):
return self._invalid
@property
def protected(self):
if self.hrd is None:
# that's an unmanaged partition, assume protected
return True
return bool(self.hrd.get('protected', True))
def _formatter(self, name, fstype):
fmtr = _formatters.get(fstype, _default_formatter)
return fmtr(name, fstype)
def refresh(self):
"""
Reload partition status to match current real state
"""
try:
info = lsblk.lsblk(self.name, executor=self._executor)[0]
except lsblk.LsblkError:
self._invalid = True
info = {
'SIZE': 0,
'UUID': '',
'FSTYPE': '',
'MOUNTPOINT': ''
}
for key, val in info.items():
setattr(self, key.lower(), val)
def _dumpHRD(self):
with mount.Mount(self.name) as mnt:
filepath = j.tools.path.get(mnt.path).joinpath('.disk.hrd')
filepath.write_text(str(self.hrd))
filepath.chmod(400)
def format(self):
"""
Reformat the partition according to hrd
"""
if self.invalid:
raise PartitionError('Partition is invalid')
if self.mountpoint:
raise PartitionError(
'Partition is mounted on %s' % self.mountpoint
)
if self.hrd is None:
raise PartitionError('No HRD attached to disk')
fstype = self.hrd.get('filesystem')
command = self._formatter(self.name, fstype)
try:
self._executor.execute(command)
self._dumpHRD()
except Exception as e:
raise FormatError(e)
self.refresh()
def delete(self, force=False):
"""
Delete partition
:force: Force delete protected partitions, default False
"""
if self.invalid:
raise PartitionError('Partition is invalid')
if self.mountpoint:
raise PartitionError(
'Partition is mounted on %s' % self.mountpoint
)
if self.protected and not force:
raise PartitionError('Partition is protected')
m = re.match('^(.+)(\d+)$', self.name)
number = int(m.group(2))
device = m.group(1)
command = 'parted -s {device} rm {number}'.format(
device=device,
number=number
)
try:
self._executor.execute(command)
except Exception as e:
raise PartitionError(e)
self.unsetAutoMount()
self._invalid = True
|
|
import glob
import os
from . import BOTOCORE_ROOT
from .compat import json
from .compat import OrderedDict
from .exceptions import ApiVersionNotFoundError
from .exceptions import DataNotFoundError
def cachable(func):
"""
A convenient decorator for getting the data (either from the cache or
populating the cache).
For use on instances (not plain functions) that have a ``self._cache``
instance variable.
Usage::
class Loader(object):
@cachable
def load_service_model(self, service):
data = self.load_file(self, 'aws/{0}'.format(service))
return data
"""
def _wrapper(self, orig_key, **kwargs):
key = orig_key
# Make the full key, including all kwargs.
# Sort them to prevent hash randomization from creating accidental
# cache misses.
for name in sorted(kwargs.keys()):
key += '/{0}/{1}'.format(
name,
kwargs[name]
)
if key in self._cache:
return self._cache[key]
data = func(self, orig_key, **kwargs)
self._cache[key] = data
return data
return _wrapper
class JSONFileLoader(object):
"""
Handles actually loading the files themselves.
Split off as a seperate class to allow for swapping with more complex
implementations.
"""
def load_file(self, file_path):
"""
Loads a regular data file (format-specific to subclass).
This load is done uncached, so that you can always get the latest data
as needed.
Usage::
>>> loader = JSONFileLoader()
>>> loader.load_file('/path/to/some/thing.json')
{
# ...JSON data...
}
"""
try:
with open(file_path) as fp:
return json.load(fp, object_pairs_hook=OrderedDict)
except ValueError:
# For backward-compatibility with the previous implementation,
# if the JSON is bad, we'll raise a ``DataNotFoundError`` exception
# instead of letting it propagate.
raise DataNotFoundError(data_path=file_path)
class Loader(object):
"""
Intelligently loads the data botocore needs.
Handles listing the available services, loading service data & loading
arbitrary data.
Default implementation uses JSON files (the ``JSONFileLoader``) & a plain
cache (``Cache``).
"""
file_loader_class = JSONFileLoader
extension = '.json'
service_extension = 'api.json'
def __init__(self, data_path='', file_loader_class=None, extension=None,
cache=None):
"""
Sets up the Loader.
Requires a ``data_path`` argument, which should be a unix-style PATH
variable (multiple file paths, colon-delimited).
Optionally accepts a ``file_loader_class`` parameter, which should be a
class to use for loading files. Default is ``JSONFileLoader``.
Optionally accepts an ``extension`` parameter, which should be a
string of the file extension to use. Default is ``.json``.
Optionally accepts a ``cache`` parameter, which should be a
an instance with the same interface as the ``Cache`` class.
Default is ``None`` (creates its own ``Cache()`` instance).
"""
super(Loader, self).__init__()
self._data_path = data_path
self._cache = {}
if file_loader_class is not None:
self.file_loader_class = file_loader_class
if extension is not None:
self.extension = extension
if cache is not None:
self._cache = cache
self.file_loader = self.file_loader_class()
@property
def data_path(self):
return self._data_path
@data_path.setter
def data_path(self, value):
self._data_path = value
def get_search_paths(self):
"""
Return the all the paths that data could be found on when searching for
files.
Usage::
# Default:
>>> loader = Loader('/path/to/botocore/data')
>>> loader.get_search_paths()
[
'/path/to/botocore/data',
]
# User-added paths
>>> loader = Loader('~/.botocore/my_overrides:/path/to/botocore/data')
>>> loader.get_search_paths()
[
'/home/somebody/.botocore/my_overrides',
'/path/to/botocore/data',
]
"""
paths = []
# Now look for optional user-configured paths.
# We keep the order in a familiar manner of traditional UNIX paths
# (overrides first).
search_path = self.data_path
if search_path is not None:
extra_paths = search_path.split(os.pathsep)
for path in extra_paths:
path = os.path.expandvars(path)
path = os.path.expanduser(path)
paths.append(path)
# Automatically add ./botocore/data to the end of the
# data search path.
paths.append(os.path.join(BOTOCORE_ROOT, 'data'))
return paths
@cachable
def load_data(self, data_path):
"""
Either loads a regular data file (format-specific to subclass) or
returns previously loaded data from the cache.
Returns a dictionary of data from the file.
Usage::
>>> loader = Loader('/path/to/botocore/data')
>>> loader.load_data('aws/ec2/2013-02-01')
{
# ...EC2 service data...
}
>>> loader.load_data('_endpoints')
{
# ...Endpoint data...
}
"""
# Here, we'll cache it.
return self._load_data(data_path)
def _load_data(self, data_path):
# This is the uncached version for use with ``load_service_model``.
# We go in-order, returning the first matching path we find
# based on the search paths.
for possible_path in self.get_search_paths():
full_path = os.path.join(
possible_path,
data_path + self.extension
)
try:
return self.file_loader.load_file(full_path)
except IOError:
continue
# We didn't find anything that matched on any path.
raise DataNotFoundError(data_path=data_path)
@cachable
def load_service_model(self, data_path, api_version=None):
"""
Loads a given service's model data.
Requires a ``data_path`` parameter, which should be a string. This
indicates the desired path to load, seperated by slashes. It should
**NOT** include absolute path information nor file extensions. (i.e.
``aws/ec2``, not ``/botocore/data/aws/ec2/2010-01-01.json``)
Optionally accepts an ``api_version`` parameter, which should be a
string of the desired API version. This is used when you want to pin to
a given API version rather than picking up the latest version.
An example looks like ``2013-08-27``. Default is ``None``, which means
pick the latest.
Returns a dictionary of service model data.
Usage::
>>> loader = Loader('/path/to/botocore/data')
>>> loader.load_service_model('aws/ec2')
{
# The latest EC2 service data...
'api_version': '2013-08-27',
# ...many more keys & values...
}
>>> loader.load_service_model('aws/ec2', api_version='2013-02-01')
{
# The EC2 service data for version 2013-02-01...
'api_version': '2013-02-01',
# ...many more keys & values...
}
"""
actual_data_path = self.determine_latest(
data_path,
api_version=api_version
)
# Use the private method, so that we don't double-cache.
return self._load_data(actual_data_path)
@cachable
def list_available_services(self, data_path):
"""
Loads all the service options available.
Requires a ``data_path`` parameter, which should be a string. This
indicates the desired path to load, seperated by slashes if needed.
Returns a list of service names.
Usage::
>>> loader = Loader('/path/to/botocore/data')
>>> loader.list_available_services('aws')
[
'autoscaling',
'cloudformation',
# ...
]
"""
options = []
for possible_path in self.get_search_paths():
option_glob = os.path.join(possible_path, data_path, '*')
for possible_option in glob.glob(option_glob):
if os.path.isdir(possible_option):
options.append(os.path.basename(possible_option))
return sorted(options)
def determine_latest(self, data_path, api_version=None):
"""
For given desired data_path, searches all possible locations for the
version of the data file that best matches.
This is used primarily for the service models themselves, which
typically have an API version attached to them.
Requires a ``data_path`` parameter, which should be a string. This
indicates the desired path to load, seperated by slashes. It should
**NOT** include absolute path information nor file extensions. (i.e.
``aws/ec2``, not ``/botocore/data/aws/ec2/2010-01-01.json``)
Optionally accepts an ``api_version`` parameter, which should be a
string of the desired API version. This is used when you want to pin to
a given API version rather than picking up the latest version.
An example looks like ``2013-08-27``. Default is ``None``, which means
pick the latest.
If the ``api_version`` desired can not be found, the loader will pick
the next best match that is backward-compatible with the provided
version. If a compatible version can not be found, an
``ApiVersionNotFoundError`` exception will be thrown.
Usage::
>>> loader = Loader('~/.botocore/my_overrides:/path/to/botocore/data')
# Just grabs the latest.
>>> loader.determine_latest('aws/rds')
'aws/rds/2013-05-15'
# Grabs the matching version.
>>> loader.determine_latest('aws/rds', api_version='2013-02-12')
'aws/rds/2013-02-12'
# Finds the best match.
>>> loader.determine_latest('aws/rds', api_version='2013-01-31')
'aws/rds/2013-01-10'
# Couldn't find a match.
>>> loader.determine_latest('aws/rds', api_version='2010-05-16')
# Traceback, then...
ApiVersionNotFoundError: Unable to load data aws/rds for: 2010-05-16
"""
all_options = []
best_match = None
# Hunt down the options.
for base_path in self.get_search_paths():
path = os.path.join(base_path, data_path)
# If it doesn't exist, skip it (might be in a later path).
if not os.path.exists(path):
continue
# If it's not a directory, we're not going to find versions.
# Carry on.
if not os.path.isdir(path):
continue
# If it's a directory, look inside for the right version.
glob_exp = os.path.join(path, '*' + self.service_extension)
options = glob.glob(glob_exp)
# No options == no dice. Move along.
if not len(options):
continue
for raw_opt in options:
# Rip off the extension.
opt = os.path.splitext(raw_opt)[0]
# Cut off the path.
opt = opt.replace(path, '')
# If the left-most character is a path separator,
# remove that too.
if opt[0] == os.path.sep:
opt = opt[1:]
# One last check. Ensure it looks roughly like a versioned file.
if not opt.count('-') == 2:
continue
all_options.append(opt)
if not len(all_options):
# We don't have any matches. Error out.
raise ApiVersionNotFoundError(
data_path=data_path,
api_version=api_version
)
# Reverse the list, so we can find the most correct/recent
# lexicographically.
all_options = sorted(all_options, reverse=True)
if api_version is None:
# We just care about the latest. Since they're in the proper order,
# simply use the first one.
best_match = all_options[0]
else:
# We need to look for an API version that either matches or is
# the first to come before that (and hence, backward-compatible).
for opt in all_options:
# ``opt`` will be something like "2014-01-01.api" so we need
# to strip off the ".api" part.
if opt.split('.')[0] == api_version:
best_match = opt
break
elif opt < api_version:
# Since it's in reverse sorted order & nothing previously
# matched, we know this is the closest API version that's
# backward-compatible.
best_match = opt
break
if not best_match:
# We didn't find anything. Error out.
raise ApiVersionNotFoundError(
data_path=data_path,
api_version=api_version
)
# We've got the best match. Make a real path out of it & return that
# for use elsewhere.
return os.path.join(data_path, best_match)
|
|
import os
import tempfile
import uuid
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.fields.files import ImageField, ImageFieldFile
from django.db.models.fields.related import (
ForeignKey, ForeignObject, ManyToManyField, OneToOneField,
)
from django.utils import six
try:
from PIL import Image
except ImportError:
Image = None
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, default=get_foo, related_name=b'bars')
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1, 'First'),
(2, 'Second'),
)
),
('Group 2', (
(3, 'Third'),
(4, 'Fourth'),
)
),
(0, 'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class Counter(six.Iterator):
def __init__(self):
self.n = 1
def __iter__(self):
return self
def __next__(self):
if self.n > 5:
raise StopIteration
else:
self.n += 1
return (self.n, 'val-' + str(self.n))
class WhizIter(models.Model):
c = models.IntegerField(choices=Counter(), null=True)
class WhizIterEmpty(models.Model):
c = models.CharField(choices=(x for x in []), blank=True, max_length=1)
class BigD(models.Model):
d = models.DecimalField(max_digits=38, decimal_places=30)
class FloatModel(models.Model):
size = models.FloatField()
class BigS(models.Model):
s = models.SlugField(max_length=255)
class SmallIntegerModel(models.Model):
value = models.SmallIntegerField()
class IntegerModel(models.Model):
value = models.IntegerField()
class BigIntegerModel(models.Model):
value = models.BigIntegerField()
null_value = models.BigIntegerField(null=True, blank=True)
class PositiveSmallIntegerModel(models.Model):
value = models.PositiveSmallIntegerField()
class PositiveIntegerModel(models.Model):
value = models.PositiveIntegerField()
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
class NullBooleanModel(models.Model):
nbfield = models.NullBooleanField()
class BooleanModel(models.Model):
bfield = models.BooleanField(default=None)
string = models.CharField(max_length=10, default='abc')
class DateTimeModel(models.Model):
d = models.DateField()
dt = models.DateTimeField()
t = models.TimeField()
class DurationModel(models.Model):
field = models.DurationField()
class NullDurationModel(models.Model):
field = models.DurationField(null=True)
class PrimaryKeyCharModel(models.Model):
string = models.CharField(max_length=10, primary_key=True)
class FksToBooleans(models.Model):
"""Model with FKs to models with {Null,}BooleanField's, #15040"""
bf = models.ForeignKey(BooleanModel)
nbf = models.ForeignKey(NullBooleanModel)
class FkToChar(models.Model):
"""Model with FK to a model with a CharField primary key, #19299"""
out = models.ForeignKey(PrimaryKeyCharModel)
class RenamedField(models.Model):
modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),))
class VerboseNameField(models.Model):
id = models.AutoField("verbose pk", primary_key=True)
field1 = models.BigIntegerField("verbose field1")
field2 = models.BooleanField("verbose field2", default=False)
field3 = models.CharField("verbose field3", max_length=10)
field4 = models.CommaSeparatedIntegerField("verbose field4", max_length=99)
field5 = models.DateField("verbose field5")
field6 = models.DateTimeField("verbose field6")
field7 = models.DecimalField("verbose field7", max_digits=6, decimal_places=1)
field8 = models.EmailField("verbose field8")
field9 = models.FileField("verbose field9", upload_to="unused")
field10 = models.FilePathField("verbose field10")
field11 = models.FloatField("verbose field11")
# Don't want to depend on Pillow in this test
# field_image = models.ImageField("verbose field")
field12 = models.IntegerField("verbose field12")
field13 = models.GenericIPAddressField("verbose field13", protocol="ipv4")
field14 = models.NullBooleanField("verbose field14")
field15 = models.PositiveIntegerField("verbose field15")
field16 = models.PositiveSmallIntegerField("verbose field16")
field17 = models.SlugField("verbose field17")
field18 = models.SmallIntegerField("verbose field18")
field19 = models.TextField("verbose field19")
field20 = models.TimeField("verbose field20")
field21 = models.URLField("verbose field21")
field22 = models.UUIDField("verbose field22")
field23 = models.DurationField("verbose field23")
class GenericIPAddress(models.Model):
ip = models.GenericIPAddressField(null=True, protocol='ipv4')
###############################################################################
# These models aren't used in any test, just here to ensure they validate
# successfully.
# See ticket #16570.
class DecimalLessThanOne(models.Model):
d = models.DecimalField(max_digits=3, decimal_places=3)
# See ticket #18389.
class FieldClassAttributeModel(models.Model):
field_class = models.CharField
###############################################################################
class DataModel(models.Model):
short_data = models.BinaryField(max_length=10, default=b'\x08')
data = models.BinaryField()
###############################################################################
# FileField
class Document(models.Model):
myfile = models.FileField(upload_to='unused')
###############################################################################
# ImageField
# If Pillow available, do these tests.
if Image:
class TestImageFieldFile(ImageFieldFile):
"""
Custom Field File class that records whether or not the underlying file
was opened.
"""
def __init__(self, *args, **kwargs):
self.was_opened = False
super(TestImageFieldFile, self).__init__(*args, **kwargs)
def open(self):
self.was_opened = True
super(TestImageFieldFile, self).open()
class TestImageField(ImageField):
attr_class = TestImageFieldFile
# Set up a temp directory for file storage.
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
temp_upload_to_dir = os.path.join(temp_storage.location, 'tests')
class Person(models.Model):
"""
Model that defines an ImageField with no dimension fields.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests')
class AbsctractPersonWithHeight(models.Model):
"""
Abstract model that defines an ImageField with only one dimension field
to make sure the dimension update is correctly run on concrete subclass
instance post-initialization.
"""
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height')
mugshot_height = models.PositiveSmallIntegerField()
class Meta:
abstract = True
class PersonWithHeight(AbsctractPersonWithHeight):
"""
Concrete model that subclass an abctract one with only on dimension
field.
"""
name = models.CharField(max_length=50)
class PersonWithHeightAndWidth(models.Model):
"""
Model that defines height and width fields after the ImageField.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
class PersonDimensionsFirst(models.Model):
"""
Model that defines height and width fields before the ImageField.
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
class PersonTwoImages(models.Model):
"""
Model that:
* Defines two ImageFields
* Defines the height/width fields before the ImageFields
* Has a nullalble ImageField
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
headshot_height = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot_width = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot = TestImageField(blank=True, null=True,
storage=temp_storage, upload_to='tests',
height_field='headshot_height',
width_field='headshot_width')
class AllFieldsModel(models.Model):
big_integer = models.BigIntegerField()
binary = models.BinaryField()
boolean = models.BooleanField(default=False)
char = models.CharField(max_length=10)
csv = models.CommaSeparatedIntegerField(max_length=10)
date = models.DateField()
datetime = models.DateTimeField()
decimal = models.DecimalField(decimal_places=2, max_digits=2)
duration = models.DurationField()
email = models.EmailField()
file_path = models.FilePathField()
floatf = models.FloatField()
integer = models.IntegerField()
generic_ip = models.GenericIPAddressField()
null_boolean = models.NullBooleanField()
positive_integer = models.PositiveIntegerField()
positive_small_integer = models.PositiveSmallIntegerField()
slug = models.SlugField()
small_integer = models.SmallIntegerField()
text = models.TextField()
time = models.TimeField()
url = models.URLField()
uuid = models.UUIDField()
fo = ForeignObject(
'self',
from_fields=['abstract_non_concrete_id'],
to_fields=['id'],
related_name='reverse'
)
fk = ForeignKey(
'self',
related_name='reverse2'
)
m2m = ManyToManyField('self')
oto = OneToOneField('self')
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType)
gfk = GenericForeignKey()
gr = GenericRelation(DataModel)
###############################################################################
class UUIDModel(models.Model):
field = models.UUIDField()
class NullableUUIDModel(models.Model):
field = models.UUIDField(blank=True, null=True)
class PrimaryKeyUUIDModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
class RelatedToUUIDModel(models.Model):
uuid_fk = models.ForeignKey('PrimaryKeyUUIDModel')
class UUIDChild(PrimaryKeyUUIDModel):
pass
class UUIDGrandchild(UUIDChild):
pass
|
|
"""This module defines a base Exporter class. For Jinja template-based export,
see templateexporter.py.
"""
from __future__ import print_function, absolute_import
import io
import os
import copy
import collections
import datetime
from traitlets.config.configurable import LoggingConfigurable
from traitlets.config import Config
import nbformat
from traitlets import HasTraits, Unicode, List, TraitError
from ipython_genutils.importstring import import_item
from ipython_genutils import text, py3compat
class ResourcesDict(collections.defaultdict):
def __missing__(self, key):
return ''
class FilenameExtension(Unicode):
"""A trait for filename extensions."""
default_value = u''
info_text = 'a filename extension, beginning with a dot'
def validate(self, obj, value):
# cast to proper unicode
value = super(FilenameExtension, self).validate(obj, value)
# check that it starts with a dot
if value and not value.startswith('.'):
msg = "FileExtension trait '{}' does not begin with a dot: {!r}"
raise TraitError(msg.format(self.name, value))
return value
class Exporter(LoggingConfigurable):
"""
Class containing methods that sequentially run a list of preprocessors on a
NotebookNode object and then return the modified NotebookNode object and
accompanying resources dict.
"""
file_extension = FilenameExtension(
'.txt', config=True,
help="Extension of the file that should be written to disk"
)
# MIME type of the result file, for HTTP response headers.
# This is *not* a traitlet, because we want to be able to access it from
# the class, not just on instances.
output_mimetype = ''
#Configurability, allows the user to easily add filters and preprocessors.
preprocessors = List(config=True,
help="""List of preprocessors, by name or namespace, to enable.""")
_preprocessors = List()
default_preprocessors = List([
'nbconvert.preprocessors.ClearOutputPreprocessor',
'nbconvert.preprocessors.ExecutePreprocessor',
'nbconvert.preprocessors.coalesce_streams',
'nbconvert.preprocessors.SVG2PDFPreprocessor',
'nbconvert.preprocessors.CSSHTMLHeaderPreprocessor',
'nbconvert.preprocessors.LatexPreprocessor',
'nbconvert.preprocessors.HighlightMagicsPreprocessor',
'nbconvert.preprocessors.ExtractOutputPreprocessor',
],
config=True,
help="""List of preprocessors available by default, by name, namespace,
instance, or type.""")
def __init__(self, config=None, **kw):
"""
Public constructor
Parameters
----------
config : :class:`~traitlets.config.Config`
User configuration instance.
`**kw`
Additional keyword arguments passed to parent __init__
"""
with_default_config = self.default_config
if config:
with_default_config.merge(config)
super(Exporter, self).__init__(config=with_default_config, **kw)
self._init_preprocessors()
@property
def default_config(self):
return Config()
def from_notebook_node(self, nb, resources=None, **kw):
"""
Convert a notebook from a notebook node instance.
Parameters
----------
nb : :class:`~nbformat.NotebookNode`
Notebook node (dict-like with attr-access)
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
`**kw`
Ignored
"""
nb_copy = copy.deepcopy(nb)
resources = self._init_resources(resources)
if 'language' in nb['metadata']:
resources['language'] = nb['metadata']['language'].lower()
# Preprocess
nb_copy, resources = self._preprocess(nb_copy, resources)
return nb_copy, resources
def from_filename(self, filename, resources=None, **kw):
"""
Convert a notebook from a notebook file.
Parameters
----------
filename : str
Full filename of the notebook file to open and convert.
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
`**kw`
Ignored
"""
# Pull the metadata from the filesystem.
if resources is None:
resources = ResourcesDict()
if not 'metadata' in resources or resources['metadata'] == '':
resources['metadata'] = ResourcesDict()
path, basename = os.path.split(filename)
notebook_name = basename[:basename.rfind('.')]
resources['metadata']['name'] = notebook_name
resources['metadata']['path'] = path
modified_date = datetime.datetime.fromtimestamp(os.path.getmtime(filename))
resources['metadata']['modified_date'] = modified_date.strftime(text.date_format)
with io.open(filename, encoding='utf-8') as f:
return self.from_file(f, resources=resources, **kw)
def from_file(self, file_stream, resources=None, **kw):
"""
Convert a notebook from a notebook file.
Parameters
----------
file_stream : file-like object
Notebook file-like object to convert.
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
`**kw`
Ignored
"""
return self.from_notebook_node(nbformat.read(file_stream, as_version=4), resources=resources, **kw)
def register_preprocessor(self, preprocessor, enabled=False):
"""
Register a preprocessor.
Preprocessors are classes that act upon the notebook before it is
passed into the Jinja templating engine. preprocessors are also
capable of passing additional information to the Jinja
templating engine.
Parameters
----------
preprocessor : :class:`~nbconvert.preprocessors.Preprocessor`
A dotted module name, a type, or an instance
enabled : bool
Mark the preprocessor as enabled
"""
if preprocessor is None:
raise TypeError('preprocessor')
isclass = isinstance(preprocessor, type)
constructed = not isclass
# Handle preprocessor's registration based on it's type
if constructed and isinstance(preprocessor, py3compat.string_types):
# Preprocessor is a string, import the namespace and recursively call
# this register_preprocessor method
preprocessor_cls = import_item(preprocessor)
return self.register_preprocessor(preprocessor_cls, enabled)
if constructed and hasattr(preprocessor, '__call__'):
# Preprocessor is a function, no need to construct it.
# Register and return the preprocessor.
if enabled:
preprocessor.enabled = True
self._preprocessors.append(preprocessor)
return preprocessor
elif isclass and issubclass(preprocessor, HasTraits):
# Preprocessor is configurable. Make sure to pass in new default for
# the enabled flag if one was specified.
self.register_preprocessor(preprocessor(parent=self), enabled)
elif isclass:
# Preprocessor is not configurable, construct it
self.register_preprocessor(preprocessor(), enabled)
else:
# Preprocessor is an instance of something without a __call__
# attribute.
raise TypeError('preprocessor')
def _init_preprocessors(self):
"""
Register all of the preprocessors needed for this exporter, disabled
unless specified explicitly.
"""
self._preprocessors = []
# Load default preprocessors (not necessarly enabled by default).
for preprocessor in self.default_preprocessors:
self.register_preprocessor(preprocessor)
# Load user-specified preprocessors. Enable by default.
for preprocessor in self.preprocessors:
self.register_preprocessor(preprocessor, enabled=True)
def _init_resources(self, resources):
#Make sure the resources dict is of ResourcesDict type.
if resources is None:
resources = ResourcesDict()
if not isinstance(resources, ResourcesDict):
new_resources = ResourcesDict()
new_resources.update(resources)
resources = new_resources
#Make sure the metadata extension exists in resources
if 'metadata' in resources:
if not isinstance(resources['metadata'], ResourcesDict):
new_metadata = ResourcesDict()
new_metadata.update(resources['metadata'])
resources['metadata'] = new_metadata
else:
resources['metadata'] = ResourcesDict()
if not resources['metadata']['name']:
resources['metadata']['name'] = 'Notebook'
#Set the output extension
resources['output_extension'] = self.file_extension
return resources
def _preprocess(self, nb, resources):
"""
Preprocess the notebook before passing it into the Jinja engine.
To preprocess the notebook is to successively apply all the
enabled preprocessors. Output from each preprocessor is passed
along to the next one.
Parameters
----------
nb : notebook node
notebook that is being exported.
resources : a dict of additional resources that
can be accessed read/write by preprocessors
"""
# Do a copy.deepcopy first,
# we are never safe enough with what the preprocessors could do.
nbc = copy.deepcopy(nb)
resc = copy.deepcopy(resources)
#Run each preprocessor on the notebook. Carry the output along
#to each preprocessor
for preprocessor in self._preprocessors:
nbc, resc = preprocessor(nbc, resc)
return nbc, resc
|
|
import csv
from datetime import datetime, date
import json
import os
import re
import six
import tempfile
from urlparse import urlparse
from zipfile import ZipFile
from bson import json_util
from django.conf import settings
from django.core.files.base import File
from django.core.files.temp import NamedTemporaryFile
from django.core.files.storage import get_storage_class
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
from django.utils.text import slugify
from openpyxl.date_time import SharedDate
from openpyxl.workbook import Workbook
from pyxform.question import Question
from pyxform.section import Section, RepeatingSection
from savReaderWriter import SavWriter
from json2xlsclient.client import Client
from onadata.apps.fieldsight.models import Site
from onadata.apps.logger.models import Attachment, Instance, XForm
from onadata.apps.main.models.meta_data import MetaData
from onadata.apps.viewer.models.export import Export
from onadata.apps.viewer.models.parsed_instance import\
_is_invalid_for_mongo, _encode_for_mongo, dict_for_mongo,\
_decode_from_mongo
from onadata.libs.utils.viewer_tools import create_attachments_zipfile
from onadata.libs.utils.common_tags import (
ID, XFORM_ID_STRING, STATUS, ATTACHMENTS, GEOLOCATION, BAMBOO_DATASET_ID,
DELETEDAT, USERFORM_ID, INDEX, PARENT_INDEX, PARENT_TABLE_NAME,
SUBMISSION_TIME, UUID, TAGS, NOTES, SITE, FS_STATUS, FS_UUID, FS_PROJECT_UUID, FS_SITE_IDENTIFIER, FS_SITE_NAME,
FS_SITE_ADDRESS, FS_SITE_PHONE, FS_SITE_SUPERVISOR)
from onadata.libs.exceptions import J2XException
from .analyser_export import generate_analyser
from onadata.apps.fsforms.XFormMediaAttributes import get_questions_and_media_attributes
from onadata.apps.fsforms.models import FInstance
# this is Mongo Collection where we will store the parsed submissions
xform_instances = settings.MONGO_DB.instances
QUESTION_TYPES_TO_EXCLUDE = [
u'note',
]
# the bind type of select multiples that we use to compare
MULTIPLE_SELECT_BIND_TYPE = u"select"
GEOPOINT_BIND_TYPE = u"geopoint"
def encode_if_str(row, key, encode_dates=False):
val = row.get(key)
if isinstance(val, six.string_types):
return val.encode('utf-8')
if encode_dates and isinstance(val, datetime):
return val.strftime('%Y-%m-%dT%H:%M:%S%z').encode('utf-8')
if encode_dates and isinstance(val, date):
return val.strftime('%Y-%m-%d').encode('utf-8')
return val
def question_types_to_exclude(_type):
return _type in QUESTION_TYPES_TO_EXCLUDE
class DictOrganizer(object):
def set_dict_iterator(self, dict_iterator):
self._dict_iterator = dict_iterator
# Every section will get its own table
# I need to think of an easy way to flatten out a dictionary
# parent name, index, table name, data
def _build_obs_from_dict(self, d, obs, table_name,
parent_table_name, parent_index):
if table_name not in obs:
obs[table_name] = []
this_index = len(obs[table_name])
obs[table_name].append({
u"_parent_table_name": parent_table_name,
u"_parent_index": parent_index,
})
for k, v in d.items():
if type(v) != dict and type(v) != list:
assert k not in obs[table_name][-1]
obs[table_name][-1][k] = v
obs[table_name][-1][u"_index"] = this_index
for k, v in d.items():
if type(v) == dict:
kwargs = {
"d": v,
"obs": obs,
"table_name": k,
"parent_table_name": table_name,
"parent_index": this_index
}
self._build_obs_from_dict(**kwargs)
if type(v) == list:
for i, item in enumerate(v):
kwargs = {
"d": item,
"obs": obs,
"table_name": k,
"parent_table_name": table_name,
"parent_index": this_index,
}
self._build_obs_from_dict(**kwargs)
return obs
def get_observation_from_dict(self, d):
result = {}
assert len(d.keys()) == 1
root_name = d.keys()[0]
kwargs = {
"d": d[root_name],
"obs": result,
"table_name": root_name,
"parent_table_name": u"",
"parent_index": -1,
}
self._build_obs_from_dict(**kwargs)
return result
def dict_to_joined_export(data, index, indices, name):
"""
Converts a dict into one or more tabular datasets
"""
output = {}
# TODO: test for _geolocation and attachment lists
if isinstance(data, dict):
for key, val in data.iteritems():
if isinstance(val, list) and key not in [NOTES, TAGS]:
output[key] = []
for child in val:
if key not in indices:
indices[key] = 0
indices[key] += 1
child_index = indices[key]
new_output = dict_to_joined_export(
child, child_index, indices, key)
d = {INDEX: child_index, PARENT_INDEX: index,
PARENT_TABLE_NAME: name}
# iterate over keys within new_output and append to
# main output
for out_key, out_val in new_output.iteritems():
if isinstance(out_val, list):
if out_key not in output:
output[out_key] = []
output[out_key].extend(out_val)
else:
d.update(out_val)
output[key].append(d)
else:
if name not in output:
output[name] = {}
if key in [TAGS]:
output[name][key] = ",".join(val)
elif key in [NOTES]:
output[name][key] = "\r\n".join(
[v['note'] for v in val])
else:
output[name][key] = val
return output
class ExportBuilder(object):
IGNORED_COLUMNS = [XFORM_ID_STRING, STATUS, ATTACHMENTS, GEOLOCATION,
BAMBOO_DATASET_ID, DELETEDAT]
# fields we export but are not within the form's structure
EXTRA_FIELDS = [ID, UUID, SUBMISSION_TIME, INDEX, PARENT_TABLE_NAME,
PARENT_INDEX, TAGS, NOTES, SITE, FS_PROJECT_UUID, FS_UUID,
FS_STATUS, FS_SITE_IDENTIFIER, FS_SITE_NAME, FS_SITE_ADDRESS, FS_SITE_PHONE, FS_SITE_SUPERVISOR]
SPLIT_SELECT_MULTIPLES = True
BINARY_SELECT_MULTIPLES = False
# column group delimiters
GROUP_DELIMITER_SLASH = '/'
GROUP_DELIMITER_DOT = '.'
GROUP_DELIMITER = GROUP_DELIMITER_SLASH
GROUP_DELIMITERS = [GROUP_DELIMITER_SLASH, GROUP_DELIMITER_DOT]
TYPES_TO_CONVERT = ['int', 'decimal', 'date'] # , 'dateTime']
CONVERT_FUNCS = {
'int': lambda x: int(x),
'decimal': lambda x: float(x),
'date': lambda x: ExportBuilder.string_to_date_with_xls_validation(x),
'dateTime': lambda x: datetime.strptime(x[:19], '%Y-%m-%dT%H:%M:%S')
}
XLS_SHEET_NAME_MAX_CHARS = 31
@classmethod
def string_to_date_with_xls_validation(cls, date_str):
date_obj = datetime.strptime(date_str, '%Y-%m-%d').date()
try:
SharedDate().datetime_to_julian(date_obj)
except ValueError:
return date_str
else:
return date_obj
@classmethod
def format_field_title(cls, abbreviated_xpath, field_delimiter):
if field_delimiter != '/':
return field_delimiter.join(abbreviated_xpath.split('/'))
return abbreviated_xpath
def set_survey(self, survey):
# TODO resolve circular import
from onadata.apps.viewer.models.data_dictionary import\
DataDictionary
def build_sections(
current_section, survey_element, sections, select_multiples,
gps_fields, encoded_fields, field_delimiter='/'):
for child in survey_element.children:
current_section_name = current_section['name']
# if a section, recurs
if isinstance(child, Section):
# if its repeating, build a new section
if isinstance(child, RepeatingSection):
# section_name in recursive call changes
section = {
'name': child.get_abbreviated_xpath(),
'elements': []}
self.sections.append(section)
build_sections(
section, child, sections, select_multiples,
gps_fields, encoded_fields, field_delimiter)
else:
# its a group, recurs using the same section
build_sections(
current_section, child, sections, select_multiples,
gps_fields, encoded_fields, field_delimiter)
elif isinstance(child, Question) and child.bind.get(u"type")\
not in QUESTION_TYPES_TO_EXCLUDE:
# add to survey_sections
if isinstance(child, Question):
child_xpath = child.get_abbreviated_xpath()
current_section['elements'].append({
'title': ExportBuilder.format_field_title(
child.get_abbreviated_xpath(),
field_delimiter),
'xpath': child_xpath,
'type': child.bind.get(u"type")
})
if _is_invalid_for_mongo(child_xpath):
if current_section_name not in encoded_fields:
encoded_fields[current_section_name] = {}
encoded_fields[current_section_name].update(
{child_xpath: _encode_for_mongo(child_xpath)})
# if its a select multiple, make columns out of its choices
if child.bind.get(u"type") == MULTIPLE_SELECT_BIND_TYPE\
and self.SPLIT_SELECT_MULTIPLES:
for c in child.children:
_xpath = c.get_abbreviated_xpath()
_title = ExportBuilder.format_field_title(
_xpath, field_delimiter)
choice = {
'title': _title,
'xpath': _xpath,
'type': 'string'
}
if choice not in current_section['elements']:
current_section['elements'].append(choice)
_append_xpaths_to_section(
current_section_name, select_multiples,
child.get_abbreviated_xpath(),
[c.get_abbreviated_xpath()
for c in child.children])
# split gps fields within this section
if child.bind.get(u"type") == GEOPOINT_BIND_TYPE:
# add columns for geopoint components
xpaths = DataDictionary.get_additional_geopoint_xpaths(
child.get_abbreviated_xpath())
current_section['elements'].extend(
[
{
'title': ExportBuilder.format_field_title(
xpath, field_delimiter),
'xpath': xpath,
'type': 'decimal'
}
for xpath in xpaths
])
_append_xpaths_to_section(
current_section_name, gps_fields,
child.get_abbreviated_xpath(), xpaths)
def _append_xpaths_to_section(current_section_name, field_list, xpath,
xpaths):
if current_section_name not in field_list:
field_list[current_section_name] = {}
field_list[
current_section_name][xpath] = xpaths
self.survey = survey
self.select_multiples = {}
self.gps_fields = {}
self.encoded_fields = {}
main_section = {'name': survey.name, 'elements': []}
self.sections = [main_section]
build_sections(
main_section, self.survey, self.sections,
self.select_multiples, self.gps_fields, self.encoded_fields,
self.GROUP_DELIMITER)
def section_by_name(self, name):
matches = filter(lambda s: s['name'] == name, self.sections)
assert(len(matches) == 1)
return matches[0]
@classmethod
def split_select_multiples(cls, row, select_multiples):
# for each select_multiple, get the associated data and split it
for xpath, choices in select_multiples.iteritems():
# get the data matching this xpath
data = row.get(xpath)
selections = []
if data:
selections = [
u'{0}/{1}'.format(
xpath, selection) for selection in data.split()]
if not cls.BINARY_SELECT_MULTIPLES:
row.update(dict(
[(choice, choice in selections if selections else None)
for choice in choices]))
else:
YES = 1
NO = 0
row.update(dict(
[(choice, YES if choice in selections else NO)
for choice in choices]))
return row
@classmethod
def split_gps_components(cls, row, gps_fields):
# for each gps_field, get associated data and split it
for xpath, gps_components in gps_fields.iteritems():
data = row.get(xpath)
if data:
gps_parts = data.split()
if len(gps_parts) > 0:
row.update(zip(gps_components, gps_parts))
return row
@classmethod
def decode_mongo_encoded_fields(cls, row, encoded_fields):
for xpath, encoded_xpath in encoded_fields.iteritems():
if row.get(encoded_xpath):
val = row.pop(encoded_xpath)
row.update({xpath: val})
return row
@classmethod
def decode_mongo_encoded_section_names(cls, data):
return dict([(_decode_from_mongo(k), v) for k, v in data.iteritems()])
@classmethod
def convert_type(cls, value, data_type):
"""
Convert data to its native type e.g. string '1' to int 1
@param value: the string value to convert
@param data_type: the native data type to convert to
@return: the converted value
"""
func = ExportBuilder.CONVERT_FUNCS.get(data_type, lambda x: x)
try:
return func(value)
except ValueError:
return value
def pre_process_row(self, row, section):
# print(type(row),"Row##################################")
"""
Split select multiples, gps and decode . and $
"""
section_name = section['name']
# first decode fields so that subsequent lookups
# have decoded field names
if section_name in self.encoded_fields:
row = ExportBuilder.decode_mongo_encoded_fields(
row, self.encoded_fields[section_name])
if self.SPLIT_SELECT_MULTIPLES and\
section_name in self.select_multiples:
row = ExportBuilder.split_select_multiples(
row, self.select_multiples[section_name])
if section_name in self.gps_fields:
row = ExportBuilder.split_gps_components(
row, self.gps_fields[section_name])
# convert to native types
# print row
for elm in section['elements']:
# only convert if its in our list and its not empty, just to
# optimize
value = row.get(elm['xpath'])
if elm['type'] in ExportBuilder.TYPES_TO_CONVERT\
and value is not None and value != '':
row[elm['xpath']] = ExportBuilder.convert_type(
value, elm['type'])
try:
site_id = row['fs_site']
site = Site.objects.get(pk=site_id)
except Exception as e:
print(str(e)," **************** NO Site")
# print(str(row))
row['site_name'] = ""
row['address'] = ""
row['phone'] = ""
row['identifier'] = ""
else:
row['site_name'] = site.name
row['address'] = site.address
row['phone'] = site.phone
row['identifier'] = site.identifier
return row
def to_zipped_csv(self, path, data, *args):
def write_row(row, csv_writer, fields):
csv_writer.writerow(
[encode_if_str(row, field) for field in fields])
csv_defs = {}
for section in self.sections:
csv_file = NamedTemporaryFile(suffix=".csv")
csv_writer = csv.writer(csv_file)
csv_defs[section['name']] = {
'csv_file': csv_file, 'csv_writer': csv_writer}
# write headers
for section in self.sections:
fields = [element['title'] for element in section['elements']]\
+ self.EXTRA_FIELDS
csv_defs[section['name']]['csv_writer'].writerow(
[f.encode('utf-8') for f in fields])
index = 1
indices = {}
survey_name = self.survey.name
for d in data:
# decode mongo section names
joined_export = dict_to_joined_export(d, index, indices,
survey_name)
output = ExportBuilder.decode_mongo_encoded_section_names(
joined_export)
# attach meta fields (index, parent_index, parent_table)
# output has keys for every section
if survey_name not in output:
output[survey_name] = {}
output[survey_name][INDEX] = index
output[survey_name][PARENT_INDEX] = -1
for section in self.sections:
# get data for this section and write to csv
section_name = section['name']
csv_def = csv_defs[section_name]
fields = [
element['xpath'] for element in
section['elements']] + self.EXTRA_FIELDS
csv_writer = csv_def['csv_writer']
# section name might not exist within the output, e.g. data was
# not provided for said repeat - write test to check this
row = output.get(section_name, None)
if type(row) == dict:
write_row(
self.pre_process_row(row, section),
csv_writer, fields)
elif type(row) == list:
for child_row in row:
write_row(
self.pre_process_row(child_row, section),
csv_writer, fields)
index += 1
# write zipfile
with ZipFile(path, 'w') as zip_file:
for section_name, csv_def in csv_defs.iteritems():
csv_file = csv_def['csv_file']
csv_file.seek(0)
zip_file.write(
csv_file.name, "_".join(section_name.split("/")) + ".csv")
# close files when we are done
for section_name, csv_def in csv_defs.iteritems():
csv_def['csv_file'].close()
@classmethod
def get_valid_sheet_name(cls, desired_name, existing_names):
# a sheet name has to be <= 31 characters and not a duplicate of an
# existing sheet
# truncate sheet_name to XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS
new_sheet_name = \
desired_name[:cls.XLS_SHEET_NAME_MAX_CHARS]
# make sure its unique within the list
i = 1
generated_name = new_sheet_name
while generated_name in existing_names:
digit_length = len(str(i))
allowed_name_len = cls.XLS_SHEET_NAME_MAX_CHARS - \
digit_length
# make name the required len
if len(generated_name) > allowed_name_len:
generated_name = generated_name[:allowed_name_len]
generated_name = "{0}{1}".format(generated_name, i)
i += 1
return generated_name
def to_xls_export(self, path, data, username, id_string, *args):
xform = XForm.objects.get(
user__username__iexact=username, id_string__exact=id_string)
json_question = json.loads(xform.json)
parsedQuestions = get_questions_and_media_attributes(json_question['children'])
from django.contrib.sites.models import Site as DjangoSite
domain = DjangoSite.objects.get_current().domain
def write_row(data, work_sheet, fields, work_sheet_titles):
# work_sheet_titles = work_sheet_titles.append("fs_site")
# update parent_table with the generated sheet's title
data[PARENT_TABLE_NAME] = work_sheet_titles.get(
data.get(PARENT_TABLE_NAME))
data_new = []
for f in fields:
if f in data and f in parsedQuestions.get('media_attributes'):
data_new.append('=HYPERLINK("http://'+domain+'/attachment/medium?media_file='+xform.user.username+'/attachments/'+data.get(f)+'", "Attachment")')
else:
if f == "fs_status":
try:
status=FInstance.objects.get(instance_id=data.get('_id')).get_form_status_display()
except:
status="No Status"
data_new.append(status)
else:
data_new.append(data.get(f,''))
work_sheet.append(data_new)
wb = Workbook(optimized_write=True)
work_sheets = {}
# map of section_names to generated_names
work_sheet_titles = {}
for section in self.sections:
section_name = section['name']
work_sheet_title = ExportBuilder.get_valid_sheet_name(
"_".join(section_name.split("/")), work_sheet_titles.values())
work_sheet_titles[section_name] = work_sheet_title
work_sheets[section_name] = wb.create_sheet(
title=work_sheet_title)
# write the headers
for section in self.sections:
section_name = section['name']
headers = [
element['title'] for element in
section['elements']] + self.EXTRA_FIELDS
# get the worksheet
ws = work_sheets[section_name]
ws.append(headers)
index = 1
indices = {}
survey_name = self.survey.name
for d in data:
joined_export = dict_to_joined_export(d, index, indices,
survey_name)
output = ExportBuilder.decode_mongo_encoded_section_names(
joined_export)
# attach meta fields (index, parent_index, parent_table)
# output has keys for every section
if survey_name not in output:
output[survey_name] = {}
output[survey_name][INDEX] = index
output[survey_name][PARENT_INDEX] = -1
for section in self.sections:
# get data for this section and write to xls
section_name = section['name']
fields = [
element['xpath'] for element in
section['elements']] + self.EXTRA_FIELDS
ws = work_sheets[section_name]
# section might not exist within the output, e.g. data was
# not provided for said repeat - write test to check this
row = output.get(section_name, None)
if type(row) == dict:
write_row(
self.pre_process_row(row, section),
ws, fields, work_sheet_titles)
elif type(row) == list:
for child_row in row:
write_row(
self.pre_process_row(child_row, section),
ws, fields, work_sheet_titles)
index += 1
wb.save(filename=path)
def to_analyser_export(self, path, data, username, xform_id_string, *args):
# Get the XLSForm.
xform = XForm.objects.get(user__username__iexact=username, id_string__exact=xform_id_string)
xlsform_io= xform.to_xlsform()
if xlsform_io is None:
raise RuntimeError('XLSForm `{}` for user `{}` could not be retrieved from storage.'.
format(xform_id_string, username))
prefix = slugify('analyser_data__{}__{}'.format(username, xform_id_string))
with tempfile.NamedTemporaryFile('w+b', prefix=prefix, suffix='.xlsx',) as xls_data:
# Generate a new XLS export to work from.
self.to_xls_export(xls_data.name, data)
xls_data.file.seek(0)
# Generate the analyser file.
analyser_io= generate_analyser(xlsform_io, xls_data)
# Write the generated analyser file to the specified path
# ...which itself points to a temp file.
with open(path, 'wb') as analyser_file:
analyser_file.write(analyser_io.read())
def to_flat_csv_export(
self, path, data, username, id_string, filter_query):
# TODO resolve circular import
from onadata.apps.viewer.pandas_mongo_bridge import\
CSVDataFrameBuilder
csv_builder = CSVDataFrameBuilder(
username, id_string, filter_query, self.GROUP_DELIMITER,
self.SPLIT_SELECT_MULTIPLES, self.BINARY_SELECT_MULTIPLES)
csv_builder.export_to(path)
def to_zipped_sav(self, path, data, *args):
def write_row(row, csv_writer, fields):
sav_writer.writerow(
[encode_if_str(row, field, True) for field in fields])
sav_defs = {}
# write headers
for section in self.sections:
fields = [element['title'] for element in section['elements']]\
+ self.EXTRA_FIELDS
c = 0
var_labels = {}
var_names = []
tmp_k = {}
for field in fields:
c += 1
var_name = 'var%d' % c
var_labels[var_name] = field
var_names.append(var_name)
tmp_k[field] = var_name
var_types = dict(
[(tmp_k[element['title']],
0 if element['type'] in ['decimal', 'int'] else 255)
for element in section['elements']]
+ [(tmp_k[item],
0 if item in ['_id', '_index', '_parent_index'] else 255)
for item in self.EXTRA_FIELDS]
)
sav_file = NamedTemporaryFile(suffix=".sav")
sav_writer = SavWriter(sav_file.name, varNames=var_names,
varTypes=var_types,
varLabels=var_labels, ioUtf8=True)
sav_defs[section['name']] = {
'sav_file': sav_file, 'sav_writer': sav_writer}
index = 1
indices = {}
survey_name = self.survey.name
for d in data:
# decode mongo section names
joined_export = dict_to_joined_export(d, index, indices,
survey_name)
output = ExportBuilder.decode_mongo_encoded_section_names(
joined_export)
# attach meta fields (index, parent_index, parent_table)
# output has keys for every section
if survey_name not in output:
output[survey_name] = {}
output[survey_name][INDEX] = index
output[survey_name][PARENT_INDEX] = -1
for section in self.sections:
# get data for this section and write to csv
section_name = section['name']
sav_def = sav_defs[section_name]
fields = [
element['xpath'] for element in
section['elements']] + self.EXTRA_FIELDS
sav_writer = sav_def['sav_writer']
row = output.get(section_name, None)
if type(row) == dict:
write_row(
self.pre_process_row(row, section),
sav_writer, fields)
elif type(row) == list:
for child_row in row:
write_row(
self.pre_process_row(child_row, section),
sav_writer, fields)
index += 1
for section_name, sav_def in sav_defs.iteritems():
sav_def['sav_writer'].closeSavFile(
sav_def['sav_writer'].fh, mode='wb')
# write zipfile
with ZipFile(path, 'w') as zip_file:
for section_name, sav_def in sav_defs.iteritems():
sav_file = sav_def['sav_file']
sav_file.seek(0)
zip_file.write(
sav_file.name, "_".join(section_name.split("/")) + ".sav")
# close files when we are done
for section_name, sav_def in sav_defs.iteritems():
sav_def['sav_file'].close()
def dict_to_flat_export(d, parent_index=0):
pass
def generate_export(export_type, extension, username, id_string,
export_id=None, filter_query=None, group_delimiter='/',
split_select_multiples=True,
binary_select_multiples=False):
"""
Create appropriate export object given the export type
"""
export_type_func_map = {
Export.XLS_EXPORT: 'to_xls_export',
Export.CSV_EXPORT: 'to_flat_csv_export',
Export.CSV_ZIP_EXPORT: 'to_zipped_csv',
Export.SAV_ZIP_EXPORT: 'to_zipped_sav',
Export.ANALYSER_EXPORT: 'to_analyser_export'
}
xform = XForm.objects.get(
user__username__iexact=username, id_string__exact=id_string)
# query mongo for the cursor
records = query_mongo(username, id_string, filter_query)
export_builder = ExportBuilder()
export_builder.GROUP_DELIMITER = group_delimiter
export_builder.SPLIT_SELECT_MULTIPLES = split_select_multiples
export_builder.BINARY_SELECT_MULTIPLES = binary_select_multiples
export_builder.set_survey(xform.data_dictionary().survey)
prefix = slugify('{}_export__{}__{}'.format(export_type, username, id_string))
temp_file = NamedTemporaryFile(prefix=prefix, suffix=("." + extension))
# get the export function by export type
func = getattr(export_builder, export_type_func_map[export_type])
func.__call__(
temp_file.name, records, username, id_string, None)
# generate filename
basename = "%s_%s" % (
id_string, datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
if export_type == Export.ANALYSER_EXPORT:
# Analyser exports should be distinguished by more than just their file extension.
basename= '{}_ANALYSER_{}'.format(id_string, datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + "." + extension
# check filename is unique
while not Export.is_filename_unique(xform, filename):
filename = increment_index_in_filename(filename)
file_path = os.path.join(
username,
'exports',
id_string,
export_type,
filename)
# TODO: if s3 storage, make private - how will we protect local storage??
storage = get_storage_class()()
# seek to the beginning as required by storage classes
temp_file.seek(0)
export_filename = storage.save(
file_path,
File(temp_file, file_path))
temp_file.close()
dir_name, basename = os.path.split(export_filename)
# get or create export object
if export_id:
export = Export.objects.get(id=export_id)
else:
fsxf = filter_query.values()[0]
# print("fsxf", fsxf)
export = Export(xform=xform, export_type=export_type, fsxf_id=fsxf)
export.filedir = dir_name
export.filename = basename
export.internal_status = Export.SUCCESSFUL
# dont persist exports that have a filter
if filter_query is None:
export.save()
export.save()
return export
def query_mongo(username, id_string, query=None, hide_deleted=True):
# print("incoming query", query)
qry = query
qry["_deleted_at"] = {'$exists': False}
# query = None
# query = json.loads(query, object_hook=json_util.object_hook)\
# if query else {}
# query = dict_for_mongo(query)
# query[USERFORM_ID] = u'{0}_{1}'.format(username, id_string)
# if hide_deleted:
# query = {"$and": [query, {"_deleted_at": None}]}
# query = {"$and": [query, qry]}
# print(query)
print("cpount", xform_instances.find(qry).count())
print("qry", qry)
return xform_instances.find(qry)
def should_create_new_export(xform, export_type, fsxf=None, site_id=0, version="0"):
if site_id is None:
site_id = 0
if Export.objects.filter(
xform=xform, export_type=export_type, fsxf=fsxf, site=site_id, version=version).exists():
return False
return True
def newset_export_for(xform, export_type):
"""
Make sure you check that an export exists before calling this,
it will a DoesNotExist exception otherwise
"""
return Export.objects.filter(xform=xform, export_type=export_type)\
.latest('created_on')
def increment_index_in_filename(filename):
"""
filename should be in the form file.ext or file-2.ext - we check for the
dash and index and increment appropriately
"""
# check for an index i.e. dash then number then dot extension
regex = re.compile(r"(.+?)\-(\d+)(\..+)")
match = regex.match(filename)
if match:
basename = match.groups()[0]
index = int(match.groups()[1]) + 1
ext = match.groups()[2]
else:
index = 1
# split filename from ext
basename, ext = os.path.splitext(filename)
new_filename = "%s-%d%s" % (basename, index, ext)
return new_filename
def generate_attachments_zip_export(
export_type, extension, username, id_string, export_id=None,
filter_query=None):
xform = XForm.objects.get(user__username=username, id_string=id_string)
attachments = Attachment.objects.filter(instance__xform=xform)
basename = "%s_%s" % (id_string,
datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + "." + extension
file_path = os.path.join(
username,
'exports',
id_string,
export_type,
filename)
with NamedTemporaryFile('wb+', prefix='media_zip_export_', suffix='.zip') as temporary_file:
create_attachments_zipfile(attachments, temporary_file=temporary_file)
export_filename = get_storage_class()().save(
file_path,
File(temporary_file, file_path))
dir_name, basename = os.path.split(export_filename)
# get or create export object
if(export_id):
export = Export.objects.get(id=export_id)
else:
export = Export.objects.create(xform=xform, export_type=export_type)
export.filedir = dir_name
export.filename = basename
export.internal_status = Export.SUCCESSFUL
export.save()
return export
def generate_kml_export(
export_type, extension, username, id_string, export_id=None,
filter_query=None):
user = User.objects.get(username=username)
xform = XForm.objects.get(user__username=username, id_string=id_string)
response = render_to_response(
'survey.kml', {'data': kml_export_data(id_string, user)})
basename = "%s_%s" % (id_string,
datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + "." + extension
file_path = os.path.join(
username,
'exports',
id_string,
export_type,
filename)
storage = get_storage_class()()
temp_file = NamedTemporaryFile(suffix=extension)
temp_file.write(response.content)
temp_file.seek(0)
export_filename = storage.save(
file_path,
File(temp_file, file_path))
temp_file.close()
dir_name, basename = os.path.split(export_filename)
# get or create export object
if(export_id):
export = Export.objects.get(id=export_id)
else:
export = Export.objects.create(xform=xform, export_type=export_type)
export.filedir = dir_name
export.filename = basename
export.internal_status = Export.SUCCESSFUL
export.save()
return export
def kml_export_data(id_string, user):
# TODO resolve circular import
from onadata.apps.viewer.models.data_dictionary import DataDictionary
dd = DataDictionary.objects.get(id_string=id_string, user=user)
instances = Instance.objects.filter(
xform__user=user,
xform__id_string=id_string,
deleted_at=None,
geom__isnull=False
).order_by('id')
data_for_template = []
for instance in instances:
point = instance.point
if point:
data_for_template.append({
'name': id_string,
'id': instance.uuid,
'lat': point.y,
'lng': point.x,
})
return data_for_template
def _get_records(instances):
records = []
for instance in instances:
record = instance.get_dict()
# Get the keys
for key in record:
if '/' in key:
# replace with _
record[key.replace('/', '_')]\
= record.pop(key)
records.append(record)
return records
def _get_server_from_metadata(xform, meta, token):
report_templates = MetaData.external_export(xform)
if meta:
try:
int(meta)
except ValueError:
raise Exception(u"Invalid metadata pk {0}".format(meta))
# Get the external server from the metadata
result = report_templates.get(pk=meta)
server = result.external_export_url
name = result.external_export_name
elif token:
server = token
name = None
else:
# Take the latest value in the metadata
if not report_templates:
raise Exception(
u"Could not find the template token: Please upload template.")
server = report_templates[0].external_export_url
name = report_templates[0].external_export_name
return server, name
def generate_external_export(
export_type, username, id_string, export_id=None, token=None,
filter_query=None, meta=None):
xform = XForm.objects.get(
user__username__iexact=username, id_string__exact=id_string)
user = User.objects.get(username=username)
server, name = _get_server_from_metadata(xform, meta, token)
# dissect the url
parsed_url = urlparse(server)
token = parsed_url.path[5:]
ser = parsed_url.scheme + '://' + parsed_url.netloc
records = _get_records(Instance.objects.filter(
xform__user=user, xform__id_string=id_string))
status_code = 0
if records and server:
try:
client = Client(ser)
response = client.xls.create(token, json.dumps(records))
if hasattr(client.xls.conn, 'last_response'):
status_code = client.xls.conn.last_response.status_code
except Exception as e:
raise J2XException(
u"J2X client could not generate report. Server -> {0},"
u" Error-> {1}".format(server, e)
)
else:
if not server:
raise J2XException(u"External server not set")
elif not records:
raise J2XException(
u"No record to export. Form -> {0}".format(id_string)
)
# get or create export object
if export_id:
export = Export.objects.get(id=export_id)
else:
export = Export.objects.create(xform=xform, export_type=export_type)
export.export_url = response
if status_code == 201:
export.internal_status = Export.SUCCESSFUL
export.filename = name + '-' + response[5:] if name else response[5:]
export.export_url = ser + response
else:
export.internal_status = Export.FAILED
export.save()
return export
def upload_template_for_external_export(server, file_obj):
try:
client = Client(server)
response = client.template.create(template_file=file_obj)
if hasattr(client.template.conn, 'last_response'):
status_code = client.template.conn.last_response.status_code
except Exception as e:
response = str(e)
status_code = 500
return str(status_code) + '|' + response
|
|
# coding=utf-8
# Copyright (c) 2014-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import netaddr
from operator import itemgetter
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_service import \
LbaasServiceObject
from f5_openstack_agent.lbaasv2.drivers.bigip import utils
LOG = logging.getLogger(__name__)
class UnsupportedProtocolException(Exception):
pass
class ServiceModelAdapter(object):
"""Class to translate LBaaS service objects to BIG-IP model objects.
Creates BIG-IP model objects (dictionary of resource attributes) given
an LBaaS service objet.
"""
def __init__(self, conf):
"""Initialize the service model adapter with config."""
self.conf = conf
if self.conf.environment_prefix:
self.prefix = self.conf.environment_prefix + '_'
else:
self.prefix = utils.OBJ_PREFIX + '_'
self.esd = None
def init_esd(self, esd):
self.esd = esd
def _get_pool_monitor(self, pool, service):
"""Return a reference to the pool monitor definition."""
pool_monitor_id = pool.get('healthmonitor_id', "")
if not pool_monitor_id:
return None
monitors = service.get("healthmonitors", list())
for monitor in monitors:
if monitor.get('id', "") == pool_monitor_id:
return monitor
return None
def get_pool(self, service):
pool = service["pool"]
members = service.get('members', list())
loadbalancer = service["loadbalancer"]
healthmonitor = self._get_pool_monitor(pool, service)
return self._map_pool(loadbalancer, pool, healthmonitor, members)
def snat_mode(self):
return self.conf.f5_snat_mode
def snat_count(self):
return self.conf.f5_snat_addresses_per_subnet
def vip_on_common_network(self, service):
loadbalancer = service.get('loadbalancer', {})
network_id = loadbalancer.get('network_id', "")
return (network_id in self.conf.common_network_ids)
def init_pool_name(self, loadbalancer, pool):
"""Return a barebones pool object with name and partition."""
partition = self.get_folder_name(loadbalancer['tenant_id'])
name = self.prefix + pool["id"] if pool else ''
return {"name": name,
"partition": partition}
def get_resource_description(self, resource):
if not isinstance(resource, dict):
raise ValueError
full_description = resource.get('name', "")
description = resource.get('description', "")
if full_description:
full_description += ":"
if description:
full_description += (" %s" % (description))
elif description:
full_description = description
else:
full_description = ""
return full_description
def get_virtual(self, service):
listener = service["listener"]
loadbalancer = service["loadbalancer"]
listener["use_snat"] = self.snat_mode()
if listener["use_snat"] and self.snat_count() > 0:
listener["snat_pool_name"] = self.get_folder_name(
loadbalancer["tenant_id"])
pool = self.get_vip_default_pool(service)
if pool and "session_persistence" in pool:
listener["session_persistence"] = pool["session_persistence"]
listener_policies = self.get_listener_policies(service)
vip = self._map_virtual(loadbalancer, listener, pool=pool,
policies=listener_policies)
return vip
def get_listener_policies(self, service):
"""Return a map of listener L7 policy ids to a list of L7 rules."""
lbaas_service = LbaasServiceObject(service)
listener_policies = list()
listener = service.get('listener', None)
if not listener:
return listener_policies
listener_l7policy_ids = listener.get('l7_policies', list())
LOG.debug("L7 debug: listener policies: %s", listener_l7policy_ids)
for policy in listener_l7policy_ids:
if self.is_esd(policy.get('name')):
continue
listener_policy = lbaas_service.get_l7policy(policy['id'])
LOG.debug("L7 debug: listener policy: %s", listener_policy)
if not listener_policy:
LOG.warning("Referenced L7 policy %s for listener %s not "
"found in service.", policy['id'], listener['id'])
continue
listener_l7policy_rules = list()
rules = listener_policy.get('rules', list())
for rule in rules:
l7policy_rule = lbaas_service.get_l7rule(rule['id'])
if not l7policy_rule:
LOG.warning("Referenced L7 rule %s for policy %s not "
"found in service.", rule['id'], policy['id'])
continue
if l7policy_rule['provisioning_status'] != "PENDING_DELETE":
listener_l7policy_rules.append(l7policy_rule)
listener_policy['l7policy_rules'] = listener_l7policy_rules
listener_policies.append(listener_policy)
return listener_policies
def get_virtual_name(self, service):
vs_name = None
if "listener" in service:
listener = service["listener"]
loadbalancer = service["loadbalancer"]
vs_name = self._init_virtual_name(loadbalancer, listener)
return vs_name
def _init_virtual_name(self, loadbalancer, listener):
name = self.prefix + listener["id"]
partition = self.get_folder_name(loadbalancer['tenant_id'])
return dict(name=name, partition=partition)
def get_traffic_group(self, service):
tg = "traffic-group-local-only"
loadbalancer = service["loadbalancer"]
if "traffic_group" in loadbalancer:
tg = loadbalancer["traffic_group"]
return tg
@staticmethod
def _pending_delete(resource):
return (
resource.get('provisioning_status', "") == "PENDING_DELETE"
)
def get_vip_default_pool(self, service):
listener = service["listener"]
pools = service.get("pools", list())
default_pool = None
if "default_pool_id" in listener:
for pool in pools:
if listener['default_pool_id'] == pool['id']:
if not self._pending_delete(pool):
default_pool = pool
break
return default_pool
def get_member(self, service):
loadbalancer = service["loadbalancer"]
member = service["member"]
return self._map_member(loadbalancer, member)
def get_member_node(self, service):
loadbalancer = service["loadbalancer"]
member = service["member"]
return self._map_node(loadbalancer, member)
def get_healthmonitor(self, service):
healthmonitor = service["healthmonitor"]
loadbalancer = service["loadbalancer"]
return self._map_healthmonitor(loadbalancer,
healthmonitor)
def get_folder(self, service):
loadbalancer = service["loadbalancer"]
# XXX maybe ServiceModelAdapter should get the data it needs on
# __init__?
folder = None
if "tenant_id" in loadbalancer:
tenant_id = loadbalancer["tenant_id"]
folder_name = self.get_folder_name(tenant_id)
folder = {"name": folder_name,
"subPath": "/",
"fullPath": "/" + folder_name,
"hidden": False,
"inheritedDevicegroup": True}
if "traffic_group" in loadbalancer:
folder['trafficGroup'] = loadbalancer["traffic_group"]
folder['inheritedTrafficGroup'] = False
else:
folder['inheritedTrafficGroup'] = True
return folder
def get_folder_name(self, tenant_id):
# XXX Use of getter questionable move to @property?
if tenant_id is not None:
name = self.prefix + \
tenant_id.replace('/', '')
else:
name = "Common"
return name
def tenant_to_traffic_group(self, tenant_id, traffic_groups):
# Hash tenant id to index of traffic group
hexhash = hashlib.md5(tenant_id).hexdigest()
tg_index = int(hexhash, 16) % len(traffic_groups)
return traffic_groups[tg_index]
def _map_healthmonitor(self, loadbalancer, lbaas_healthmonitor):
healthmonitor = self.init_monitor_name(loadbalancer,
lbaas_healthmonitor)
healthmonitor["description"] = self.get_resource_description(
lbaas_healthmonitor)
# type
if "type" in lbaas_healthmonitor:
# healthmonitor["type"] = lbaas_healthmonitor["type"].lower()
if (lbaas_healthmonitor["type"] == "HTTP" or
lbaas_healthmonitor["type"] == "HTTPS"):
# url path
if "url_path" in lbaas_healthmonitor:
healthmonitor["send"] = ("GET " +
lbaas_healthmonitor["url_path"] +
" HTTP/1.0\\r\\n\\r\\n")
else:
healthmonitor["send"] = "GET / HTTP/1.0\\r\\n\\r\\n"
# expected codes
healthmonitor["recv"] = self._get_recv_text(
lbaas_healthmonitor)
# interval - delay
if "delay" in lbaas_healthmonitor:
healthmonitor["interval"] = lbaas_healthmonitor["delay"]
# timeout
if "timeout" in lbaas_healthmonitor:
if "max_retries" in lbaas_healthmonitor:
timeout = (int(lbaas_healthmonitor["max_retries"]) *
int(lbaas_healthmonitor["timeout"]))
healthmonitor["timeout"] = timeout
return healthmonitor
def init_monitor_name(self, loadbalancer, monitor):
name = self.prefix + monitor["id"]
return {"name": name,
"partition": self.get_folder_name(loadbalancer['tenant_id'])}
def _get_recv_text(self, lbaas_healthmonitor):
if "expected_codes" in lbaas_healthmonitor:
try:
if lbaas_healthmonitor['expected_codes'].find(",") > 0:
status_codes = (
lbaas_healthmonitor['expected_codes'].split(','))
recv_text = "HTTP/1.(0|1) ("
for status in status_codes:
int(status)
recv_text += status + "|"
recv_text = recv_text[:-1]
recv_text += ")"
elif lbaas_healthmonitor['expected_codes'].find("-") > 0:
status_range = (
lbaas_healthmonitor['expected_codes'].split('-'))
start_range = status_range[0]
int(start_range)
stop_range = status_range[1]
int(stop_range)
recv_text = (
"HTTP/1.(0|1) [" +
start_range + "-" +
stop_range + "]"
)
else:
int(lbaas_healthmonitor['expected_codes'])
recv_text = "HTTP/1.(0|1) " +\
lbaas_healthmonitor['expected_codes']
except Exception as exc:
LOG.error(
"invalid monitor: %s, expected_codes %s, setting to 200"
% (exc, lbaas_healthmonitor['expected_codes']))
recv_text = "HTTP/1.(0|1) 200"
else:
recv_text = "HTTP/1.(0|1) 200"
return recv_text
def get_monitor_type(self, service):
monitor_type = None
lbaas_healthmonitor = service["healthmonitor"]
if "type" in lbaas_healthmonitor:
monitor_type = lbaas_healthmonitor["type"]
return monitor_type
def _map_pool(self, loadbalancer, lbaas_pool, lbaas_hm, lbaas_members):
pool = self.init_pool_name(loadbalancer, lbaas_pool)
pool["description"] = self.get_resource_description(pool)
if "lb_algorithm" in lbaas_pool:
lbaas_lb_method = lbaas_pool['lb_algorithm'].upper()
pool['loadBalancingMode'] = \
self._set_lb_method(lbaas_lb_method, lbaas_members)
# If source_ip lb method, add SOURCE_IP persistence to ensure
# source IP loadbalancing. See issue #344 for details.
if lbaas_pool['lb_algorithm'].upper() == 'SOURCE_IP':
persist = lbaas_pool.get('session_persistence', None)
if not persist:
lbaas_pool['session_persistence'] = {'type': 'SOURCE_IP'}
if lbaas_hm:
hm = self.init_monitor_name(loadbalancer, lbaas_hm)
pool["monitor"] = hm["name"]
else:
pool["monitor"] = ""
members = list()
for member in lbaas_members:
provisioning_status = member.get('provisioning_status', "")
if provisioning_status != "PENDING_DELETE":
members.append(self._map_member(loadbalancer, member))
pool["members"] = members
return pool
def _set_lb_method(self, lbaas_lb_method, lbaas_members):
"""Set pool lb method depending on member attributes."""
lb_method = self._get_lb_method(lbaas_lb_method)
if lbaas_lb_method == 'SOURCE_IP':
return lb_method
member_has_weight = False
for member in lbaas_members:
if 'weight' in member and member['weight'] > 1 and \
member['provisioning_status'] != 'PENDING_DELETE':
member_has_weight = True
break
if member_has_weight:
if lbaas_lb_method == 'LEAST_CONNECTIONS':
return self._get_lb_method('RATIO_LEAST_CONNECTIONS')
return self._get_lb_method('RATIO')
return lb_method
def _get_lb_method(self, method):
lb_method = method.upper()
if lb_method == 'LEAST_CONNECTIONS':
return 'least-connections-member'
elif lb_method == 'RATIO_LEAST_CONNECTIONS':
return 'ratio-least-connections-member'
elif lb_method == 'SOURCE_IP':
return 'least-connections-node'
elif lb_method == 'OBSERVED_MEMBER':
return 'observed-member'
elif lb_method == 'PREDICTIVE_MEMBER':
return 'predictive-member'
elif lb_method == 'RATIO':
return 'ratio-member'
else:
return 'round-robin'
def _map_virtual(self, loadbalancer, listener, pool=None, policies=None):
if policies:
LOG.debug("L7_debug: policies: %s", policies)
vip = self._init_virtual_name(loadbalancer, listener)
vip["description"] = self.get_resource_description(listener)
if pool:
pool_name = self.init_pool_name(loadbalancer, pool)
vip['pool'] = pool_name.get('name', "")
else:
vip['pool'] = ""
vip["connectionLimit"] = listener.get("connection_limit", 0)
if vip["connectionLimit"] < 0:
vip["connectionLimit"] = 0
port = listener.get("protocol_port", None)
ip_address = loadbalancer.get("vip_address", None)
if ip_address and port:
if str(ip_address).endswith('%0'):
ip_address = ip_address[:-2]
if ':' in ip_address:
vip['destination'] = ip_address + "." + str(port)
else:
vip['destination'] = ip_address + ":" + str(port)
else:
LOG.error("No VIP address or port specified")
# differeniate the ipv4 and ipv6 cases
pure_ip_address = ip_address.split("%")[0]
ip_version = netaddr.IPAddress(pure_ip_address)
if ip_version.version == 4:
vip["mask"] = '255.255.255.255'
elif ip_version.version == 6:
vip["mask"] = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
else:
LOG.error("Not a validate ip address")
if "admin_state_up" in listener:
if listener["admin_state_up"]:
vip["enabled"] = True
else:
vip["disabled"] = True
self._add_vlan_and_snat(listener, vip)
self._add_profiles_session_persistence(listener, pool, vip)
vip['rules'] = list()
vip['policies'] = list()
if policies:
self._apply_l7_and_esd_policies(listener, policies, vip)
return vip
def _apply_l7_and_esd_policies(self, listener, policies, vip):
if not policies:
return
partition = self.get_folder_name(listener['tenant_id'])
policy_name = "wrapper_policy_" + str(listener['id'])
bigip_policy = listener.get('f5_policy', {})
if bigip_policy.get('rules', list()):
vip['policies'] = [{'name': policy_name,
'partition': partition}]
esd_composite = dict()
for policy in sorted(
policies, key=itemgetter('position'), reverse=True):
if policy['provisioning_status'] == "PENDING_DELETE":
continue
policy_name = policy.get('name', None)
esd = self.esd.get_esd(policy_name)
if esd:
esd_composite.update(esd)
if listener['protocol'] == 'TCP':
self._apply_fastl4_esd(vip, esd_composite)
else:
self._apply_esd(vip, esd_composite)
def get_esd(self, name):
if self.esd:
return self.esd.get_esd(name)
return None
def is_esd(self, name):
return self.esd.get_esd(name) is not None
def _add_profiles_session_persistence(self, listener, pool, vip):
protocol = listener.get('protocol', "")
if protocol not in ["HTTP", "HTTPS", "TCP", "TERMINATED_HTTPS"]:
LOG.warning("Listener protocol unrecognized: %s",
listener["protocol"])
vip["ipProtocol"] = "tcp"
# if protocol is HTTPS, also use fastl4
if protocol == 'TCP' or protocol == 'HTTPS':
virtual_type = 'fastl4'
else:
virtual_type = 'standard'
if virtual_type == 'fastl4':
vip['profiles'] = ['/Common/fastL4']
else:
# add profiles for HTTP, HTTPS, TERMINATED_HTTPS protocols
vip['profiles'] = ['/Common/http', '/Common/oneconnect']
vip['fallbackPersistence'] = ''
vip['persist'] = []
persistence = None
if pool:
persistence = pool.get('session_persistence', None)
lb_algorithm = pool.get('lb_algorithm', 'ROUND_ROBIN')
valid_persist_types = ['SOURCE_IP', 'APP_COOKIE', 'HTTP_COOKIE']
if persistence:
persistence_type = persistence.get('type', "")
if persistence_type not in valid_persist_types:
LOG.warning("Invalid peristence type: %s",
persistence_type)
return
if persistence_type == 'APP_COOKIE':
vip['persist'] = [{'name': 'app_cookie_' + vip['name']}]
elif persistence_type == 'SOURCE_IP':
vip['persist'] = [{'name': '/Common/source_addr'}]
elif persistence_type == 'HTTP_COOKIE':
vip['persist'] = [{'name': '/Common/cookie'}]
if persistence_type != 'SOURCE_IP':
if lb_algorithm == 'SOURCE_IP':
vip['fallbackPersistence'] = '/Common/source_addr'
if persistence_type in ['HTTP_COOKIE', 'APP_COOKIE']:
vip['profiles'] = ['/Common/http', '/Common/oneconnect']
def get_vlan(self, vip, bigip, network_id):
if network_id in bigip.assured_networks:
vip['vlans'].append(
bigip.assured_networks[network_id])
vip['vlansEnabled'] = True
vip.pop('vlansDisabled', None)
elif network_id in self.conf.common_network_ids:
vip['vlans'].append(
self.conf.common_network_ids[network_id])
vip['vlansEnabled'] = True
vip.pop('vlansDisabled', None)
def _add_vlan_and_snat(self, listener, vip):
# snat
if "use_snat" in listener and listener["use_snat"]:
vip['sourceAddressTranslation'] = {}
if "snat_pool_name" in listener:
vip['sourceAddressTranslation']['type'] = 'snat'
vip['sourceAddressTranslation']['pool'] = \
listener["snat_pool_name"]
else:
vip['sourceAddressTranslation']['type'] = 'automap'
# default values for pinning the VS to a specific VLAN set
vip['vlansDisabled'] = True
vip['vlans'] = []
def _map_member(self, loadbalancer, lbaas_member):
member = {}
port = lbaas_member["protocol_port"]
ip_address = lbaas_member["address"]
if lbaas_member["admin_state_up"]:
member["session"] = "user-enabled"
else:
member["session"] = "user-disabled"
if lbaas_member["weight"] == 0:
member["ratio"] = 1
member["session"] = "user-disabled"
else:
member["ratio"] = lbaas_member["weight"]
if ':' in ip_address:
member['name'] = ip_address + '.' + str(port)
else:
member['name'] = ip_address + ':' + str(port)
member["partition"] = self.get_folder_name(loadbalancer["tenant_id"])
member["address"] = ip_address
return member
def _map_node(self, loadbalancer, lbaas_member):
member = {}
member["name"] = lbaas_member["address"]
member["partition"] = self.get_folder_name(loadbalancer["tenant_id"])
return member
def get_network_from_service(self, service, network_id):
if 'networks' in service:
return service['networks'][network_id]
def get_subnet_from_service(self, service, subnet_id):
if 'subnets' in service:
return service['subnets'][subnet_id]
def get_tls(self, service):
tls = {}
listener = service['listener']
if listener['protocol'] == 'TERMINATED_HTTPS':
if 'default_tls_container_id' in listener and \
listener['default_tls_container_id']:
tls['default_tls_container_id'] = \
listener['default_tls_container_id']
if 'sni_containers' in listener and listener['sni_containers']:
tls['sni_containers'] = listener['sni_containers']
return tls
def get_name(self, uuid):
return self.prefix + str(uuid)
def _apply_fastl4_esd(self, vip, esd):
if not esd:
return
# Application of ESD implies some type of L7 traffic routing. Add
# an HTTP profile.
if 'lbaas_http_profile' in esd:
vip['profiles'] = ["/Common/" + esd['lbaas_http_profile'],
"/Common/fastL4"]
else:
vip['profiles'] = ["/Common/http", "/Common/fastL4"]
# persistence
if 'lbaas_persist' in esd:
if vip.get('persist'):
LOG.warning("Overwriting the existing VIP persist profile: %s",
vip['persist'])
vip['persist'] = [{'name': esd['lbaas_persist']}]
if 'lbaas_fallback_persist' in esd and vip.get('persist'):
if vip.get('fallbackPersistence'):
LOG.warning(
"Overwriting the existing VIP fallback persist "
"profile: %s", vip['fallbackPersistence'])
vip['fallbackPersistence'] = esd['lbaas_fallback_persist']
# iRules
vip['rules'] = list()
if 'lbaas_irule' in esd:
irules = []
for irule in esd['lbaas_irule']:
irules.append('/Common/' + irule)
vip['rules'] = irules
# L7 policies
if 'lbaas_policy' in esd:
if vip.get('policies'):
LOG.warning(
"LBaaS L7 policies and rules will be overridden "
"by ESD policies")
vip['policies'] = list()
policies = list()
for policy in esd['lbaas_policy']:
policies.append({'name': policy, 'partition': 'Common'})
vip['policies'] = policies
def _apply_esd(self, vip, esd):
if not esd:
return
profiles = vip['profiles']
# start with server tcp profile
if 'lbaas_stcp' in esd:
# set serverside tcp profile
profiles.append({'name': esd['lbaas_stcp'],
'partition': 'Common',
'context': 'serverside'})
# restrict client profile
ctcp_context = 'clientside'
else:
# no serverside profile; use client profile for both
ctcp_context = 'all'
# must define client profile; default to tcp if not in ESD
if 'lbaas_ctcp' in esd:
ctcp_profile = esd['lbaas_ctcp']
else:
ctcp_profile = 'tcp'
profiles.append({'name': ctcp_profile,
'partition': 'Common',
'context': ctcp_context})
if 'lbaas_oneconnect_profile' in esd:
profiles.remove('/Common/oneconnect')
profiles.append('/Common/' + esd['lbaas_oneconnect_profile'])
# SSL profiles
if 'lbaas_cssl_profile' in esd:
profiles.append({'name': esd['lbaas_cssl_profile'],
'partition': 'Common',
'context': 'clientside'})
if 'lbaas_sssl_profile' in esd:
profiles.append({'name': esd['lbaas_sssl_profile'],
'partition': 'Common',
'context': 'serverside'})
if 'lbaas_http_profile' in esd:
profiles.remove('/Common/http')
profiles.append('/Common/' + esd['lbaas_http_profile'])
# persistence
if 'lbaas_persist' in esd:
if vip.get('persist', None):
LOG.warning("Overwriting the existing VIP persist profile: %s",
vip['persist'])
vip['persist'] = [{'name': esd['lbaas_persist']}]
if 'lbaas_fallback_persist' in esd and vip.get('persist'):
if vip.get('fallbackPersistence', None):
LOG.warning(
"Overwriting the existing VIP fallback persist "
"profile: %s", vip['fallbackPersistence'])
vip['fallbackPersistence'] = esd['lbaas_fallback_persist']
# iRules
vip['rules'] = list()
if 'lbaas_irule' in esd:
irules = []
for irule in esd['lbaas_irule']:
irules.append('/Common/' + irule)
vip['rules'] = irules
# L7 policies
if 'lbaas_policy' in esd:
if vip.get('policies'):
LOG.warning(
"LBaaS L7 policies and rules will be overridden "
"by ESD policies")
vip['policies'] = list()
policies = list()
for policy in esd['lbaas_policy']:
policies.append({'name': policy, 'partition': 'Common'})
vip['policies'] = policies
|
|
from __future__ import absolute_import, division, unicode_literals
from future.builtins import int, open, str
from hashlib import md5
import os
try:
from urllib.parse import quote, unquote
except ImportError:
from urllib import quote, unquote
from django.apps import apps
from django.contrib import admin
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse, resolve, NoReverseMatch
from django.db.models import Model
from django.template import Context, Node, Template, TemplateSyntaxError
from django.template.base import (TOKEN_BLOCK, TOKEN_COMMENT,
TOKEN_TEXT, TOKEN_VAR, TextNode)
from django.template.defaultfilters import escape
from django.template.loader import get_template
from django.utils import translation
from django.utils.html import strip_tags
from django.utils.text import capfirst
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.forms import get_edit_form
from mezzanine.utils.cache import nevercache_token, cache_installed
from mezzanine.utils.html import decode_entities
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.sites import current_site_id, has_site_permission
from mezzanine.utils.urls import admin_url
from mezzanine.utils.views import is_editable
from mezzanine import template
register = template.Library()
if "compressor" in settings.INSTALLED_APPS:
@register.tag
def compress(parser, token):
"""
Shadows django-compressor's compress tag so it can be
loaded from ``mezzanine_tags``, allowing us to provide
a dummy version when django-compressor isn't installed.
"""
from compressor.templatetags.compress import compress
return compress(parser, token)
else:
@register.to_end_tag
def compress(parsed, context, token):
"""
Dummy tag for fallback when django-compressor isn't installed.
"""
return parsed
if cache_installed():
@register.tag
def nevercache(parser, token):
"""
Tag for two phased rendering. Converts enclosed template
code and content into text, which gets rendered separately
in ``mezzanine.core.middleware.UpdateCacheMiddleware``.
This is to bypass caching for the enclosed code and content.
"""
text = []
end_tag = "endnevercache"
tag_mapping = {
TOKEN_TEXT: ("", ""),
TOKEN_VAR: ("{{", "}}"),
TOKEN_BLOCK: ("{%", "%}"),
TOKEN_COMMENT: ("{#", "#}"),
}
delimiter = nevercache_token()
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == end_tag:
return TextNode(delimiter + "".join(text) + delimiter)
start, end = tag_mapping[token.token_type]
text.append("%s%s%s" % (start, token.contents, end))
parser.unclosed_block_tag(end_tag)
else:
@register.to_end_tag
def nevercache(parsed, context, token):
"""
Dummy fallback ``nevercache`` for when caching is not
configured.
"""
return parsed
@register.simple_tag(takes_context=True)
def fields_for(context, form, template="includes/form_fields.html"):
"""
Renders fields for a form with an optional template choice.
"""
context["form_for_fields"] = form
return get_template(template).render(Context(context))
@register.inclusion_tag("includes/form_errors.html", takes_context=True)
def errors_for(context, form):
"""
Renders an alert if the form has any errors.
"""
context["form"] = form
return context
@register.filter
def sort_by(items, attr):
"""
General sort filter - sorts by either attribute or key.
"""
def key_func(item):
try:
return getattr(item, attr)
except AttributeError:
try:
return item[attr]
except TypeError:
getattr(item, attr) # Reraise AttributeError
return sorted(items, key=key_func)
@register.filter
def is_installed(app_name):
"""
Returns ``True`` if the given app name is in the
``INSTALLED_APPS`` setting.
"""
from warnings import warn
warn("The is_installed filter is deprecated. Please use the tag "
"{% ifinstalled appname %}{% endifinstalled %}")
return app_name in settings.INSTALLED_APPS
@register.tag
def ifinstalled(parser, token):
"""
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
"""
try:
tag, app = token.split_contents()
except ValueError:
raise TemplateSyntaxError("ifinstalled should be in the form: "
"{% ifinstalled app_name %}"
"{% endifinstalled %}")
end_tag = "end" + tag
unmatched_end_tag = 1
if app.strip("\"'") not in settings.INSTALLED_APPS:
while unmatched_end_tag:
token = parser.tokens.pop(0)
if token.token_type == TOKEN_BLOCK:
block_name = token.contents.split()[0]
if block_name == tag:
unmatched_end_tag += 1
if block_name == end_tag:
unmatched_end_tag -= 1
parser.tokens.insert(0, token)
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
class IfInstalledNode(Node):
def render(self, context):
return nodelist.render(context)
return IfInstalledNode()
@register.render_tag
def set_short_url_for(context, token):
"""
Sets the ``short_url`` attribute of the given model for share
links in the template.
"""
obj = context[token.split_contents()[1]]
obj.set_short_url()
return ""
@register.simple_tag
def gravatar_url(email, size=32):
"""
Return the full URL for a Gravatar given an email hash.
"""
bits = (md5(email.lower().encode("utf-8")).hexdigest(), size)
return "//www.gravatar.com/avatar/%s?s=%s&d=identicon&r=PG" % bits
@register.to_end_tag
def metablock(parsed):
"""
Remove HTML tags, entities and superfluous characters from
meta blocks.
"""
parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",")
return escape(strip_tags(decode_entities(parsed)))
@register.inclusion_tag("includes/pagination.html", takes_context=True)
def pagination_for(context, current_page, page_var="page", exclude_vars=""):
"""
Include the pagination template and data for persisting querystring
in pagination links. Can also contain a comma separated string of
var names in the current querystring to exclude from the pagination
links, via the ``exclude_vars`` arg.
"""
querystring = context["request"].GET.copy()
exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var]
for exclude_var in exclude_vars:
if exclude_var in querystring:
del querystring[exclude_var]
querystring = querystring.urlencode()
return {
"current_page": current_page,
"querystring": querystring,
"page_var": page_var,
}
@register.inclusion_tag("includes/search_form.html", takes_context=True)
def search_form(context, search_model_names=None):
"""
Includes the search form with a list of models to use as choices
for filtering the search by. Models should be a string with models
in the format ``app_label.model_name`` separated by spaces. The
string ``all`` can also be used, in which case the models defined
by the ``SEARCH_MODEL_CHOICES`` setting will be used.
"""
if not search_model_names or not settings.SEARCH_MODEL_CHOICES:
search_model_names = []
elif search_model_names == "all":
search_model_names = list(settings.SEARCH_MODEL_CHOICES)
else:
search_model_names = search_model_names.split(" ")
search_model_choices = []
for model_name in search_model_names:
try:
model = apps.get_model(*model_name.split(".", 1))
except LookupError:
pass
else:
verbose_name = model._meta.verbose_name_plural.capitalize()
search_model_choices.append((verbose_name, model_name))
context["search_model_choices"] = sorted(search_model_choices)
return context
@register.simple_tag
def thumbnail(image_url, width, height, upscale=True, quality=95, left=.5,
top=.5, padding=False, padding_color="#fff"):
"""
Given the URL to an image, resizes the image using the given width
and height on the first time it is requested, and returns the URL
to the new resized image. If width or height are zero then original
ratio is maintained. When ``upscale`` is False, images smaller than
the given size will not be grown to fill that size. The given width
and height thus act as maximum dimensions.
"""
if not image_url:
return ""
try:
from PIL import Image, ImageFile, ImageOps
except ImportError:
return ""
image_url = unquote(str(image_url)).split("?")[0]
if image_url.startswith(settings.MEDIA_URL):
image_url = image_url.replace(settings.MEDIA_URL, "", 1)
image_dir, image_name = os.path.split(image_url)
image_prefix, image_ext = os.path.splitext(image_name)
filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG")
thumb_name = "%s-%sx%s" % (image_prefix, width, height)
if not upscale:
thumb_name += "-no-upscale"
if left != .5 or top != .5:
left = min(1, max(0, left))
top = min(1, max(0, top))
thumb_name = "%s-%sx%s" % (thumb_name, left, top)
thumb_name += "-padded-%s" % padding_color if padding else ""
thumb_name = "%s%s" % (thumb_name, image_ext)
# `image_name` is used here for the directory path, as each image
# requires its own sub-directory using its own name - this is so
# we can consistently delete all thumbnails for an individual
# image, which is something we do in filebrowser when a new image
# is written, allowing us to purge any previously generated
# thumbnails that may match a new image name.
thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
settings.THUMBNAILS_DIR_NAME, image_name)
if not os.path.exists(thumb_dir):
try:
os.makedirs(thumb_dir)
except OSError:
pass
thumb_path = os.path.join(thumb_dir, thumb_name)
thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
quote(image_name.encode("utf-8")),
quote(thumb_name.encode("utf-8")))
image_url_path = os.path.dirname(image_url)
if image_url_path:
thumb_url = "%s/%s" % (image_url_path, thumb_url)
try:
thumb_exists = os.path.exists(thumb_path)
except UnicodeEncodeError:
# The image that was saved to a filesystem with utf-8 support,
# but somehow the locale has changed and the filesystem does not
# support utf-8.
from mezzanine.core.exceptions import FileSystemEncodingChanged
raise FileSystemEncodingChanged()
if thumb_exists:
# Thumbnail exists, don't generate it.
return thumb_url
elif not default_storage.exists(image_url):
# Requested image does not exist, just return its URL.
return image_url
f = default_storage.open(image_url)
try:
image = Image.open(f)
except:
# Invalid image format.
return image_url
image_info = image.info
to_width = int(width)
to_height = int(height)
from_width = image.size[0]
from_height = image.size[1]
if not upscale:
to_width = min(to_width, from_width)
to_height = min(to_height, from_height)
# Set dimensions.
if to_width == 0:
to_width = from_width * to_height // from_height
elif to_height == 0:
to_height = from_height * to_width // from_width
if image.mode not in ("P", "L", "RGBA"):
try:
image = image.convert("RGBA")
except:
return image_url
# Required for progressive jpgs.
ImageFile.MAXBLOCK = 2 * (max(image.size) ** 2)
# Padding.
if padding and to_width and to_height:
from_ratio = float(from_width) / from_height
to_ratio = float(to_width) / to_height
pad_size = None
if to_ratio < from_ratio:
pad_height = int(to_height * (float(from_width) / to_width))
pad_size = (from_width, pad_height)
pad_top = (pad_height - from_height) // 2
pad_left = 0
elif to_ratio > from_ratio:
pad_width = int(to_width * (float(from_height) / to_height))
pad_size = (pad_width, from_height)
pad_top = 0
pad_left = (pad_width - from_width) // 2
if pad_size is not None:
pad_container = Image.new("RGBA", pad_size, padding_color)
pad_container.paste(image, (pad_left, pad_top))
image = pad_container
# Create the thumbnail.
to_size = (to_width, to_height)
to_pos = (left, top)
try:
image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
image = image.save(thumb_path, filetype, quality=quality, **image_info)
# Push a remote copy of the thumbnail if MEDIA_URL is
# absolute.
if "://" in settings.MEDIA_URL:
with open(thumb_path, "rb") as f:
default_storage.save(thumb_url, File(f))
except Exception:
# If an error occurred, a corrupted image may have been saved,
# so remove it, otherwise the check for it existing will just
# return the corrupted image next time it's requested.
try:
os.remove(thumb_path)
except Exception:
pass
return image_url
return thumb_url
@register.inclusion_tag("includes/editable_loader.html", takes_context=True)
def editable_loader(context):
"""
Set up the required JS/CSS for the in-line editing toolbar and controls.
"""
user = context["request"].user
context["has_site_permission"] = has_site_permission(user)
if settings.INLINE_EDITING_ENABLED and context["has_site_permission"]:
t = get_template("includes/editable_toolbar.html")
context["REDIRECT_FIELD_NAME"] = REDIRECT_FIELD_NAME
try:
context["editable_obj"]
except KeyError:
context["editable_obj"] = context.get("page", None)
context["toolbar"] = t.render(Context(context))
context["richtext_media"] = RichTextField().formfield().widget.media
return context
@register.filter
def richtext_filters(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
filter_names = settings.RICHTEXT_FILTERS
if not filter_names:
try:
filter_names = [settings.RICHTEXT_FILTER]
except AttributeError:
pass
else:
from warnings import warn
warn("The `RICHTEXT_FILTER` setting is deprecated in favor of "
"the new plural setting `RICHTEXT_FILTERS`.")
for filter_name in filter_names:
filter_func = import_dotted_path(filter_name)
content = filter_func(content)
return content
@register.filter
def richtext_filter(content):
"""
Deprecated version of richtext_filters above.
"""
from warnings import warn
warn("The `richtext_filter` template tag is deprecated in favor of "
"the new plural tag `richtext_filters`.")
return richtext_filters(content)
@register.to_end_tag
def editable(parsed, context, token):
"""
Add the required HTML to the parsed content for in-line editing,
such as the icon and edit form if the object is deemed to be
editable - either it has an ``editable`` method which returns
``True``, or the logged in user has change permissions for the
model.
"""
def parse_field(field):
field = field.split(".")
obj = context.get(field.pop(0), None)
attr = field.pop()
while field:
obj = getattr(obj, field.pop(0))
if callable(obj):
# Allows {% editable page.get_content_model.content %}
obj = obj()
return obj, attr
fields = [parse_field(f) for f in token.split_contents()[1:]]
if fields:
fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]]
if not parsed.strip():
try:
parsed = "".join([str(getattr(*field)) for field in fields])
except AttributeError:
pass
if settings.INLINE_EDITING_ENABLED and fields and "request" in context:
obj = fields[0][0]
if isinstance(obj, Model) and is_editable(obj, context["request"]):
field_names = ",".join([f[1] for f in fields])
context["editable_form"] = get_edit_form(obj, field_names)
context["original"] = parsed
t = get_template("includes/editable_form.html")
return t.render(Context(context))
return parsed
@register.simple_tag
def try_url(url_name):
"""
Mimics Django's ``url`` template tag but fails silently. Used for
url names in admin templates as these won't resolve when admin
tests are running.
"""
from warnings import warn
warn("try_url is deprecated, use the url tag with the 'as' arg instead.")
try:
url = reverse(url_name)
except NoReverseMatch:
return ""
return url
def admin_app_list(request):
"""
Adopted from ``django.contrib.admin.sites.AdminSite.index``.
Returns a list of lists of models grouped and ordered according to
``mezzanine.conf.ADMIN_MENU_ORDER``. Called from the
``admin_dropdown_menu`` template tag as well as the ``app_list``
dashboard widget.
"""
app_dict = {}
# Model or view --> (group index, group title, item index, item title).
menu_order = {}
for (group_index, group) in enumerate(settings.ADMIN_MENU_ORDER):
group_title, items = group
group_title = group_title.title()
for (item_index, item) in enumerate(items):
if isinstance(item, (tuple, list)):
item_title, item = item
else:
item_title = None
menu_order[item] = (group_index, group_title,
item_index, item_title)
# Add all registered models, using group and title from menu order.
for (model, model_admin) in admin.site._registry.items():
opts = model._meta
in_menu = not hasattr(model_admin, "in_menu") or model_admin.in_menu()
if in_menu and request.user.has_module_perms(opts.app_label):
perms = model_admin.get_model_perms(request)
admin_url_name = ""
if perms["change"]:
admin_url_name = "changelist"
change_url = admin_url(model, admin_url_name)
else:
change_url = None
if perms["add"]:
admin_url_name = "add"
add_url = admin_url(model, admin_url_name)
else:
add_url = None
if admin_url_name:
model_label = "%s.%s" % (opts.app_label, opts.object_name)
try:
app_index, app_title, model_index, model_title = \
menu_order[model_label]
except KeyError:
app_index = None
app_title = opts.app_label.title()
model_index = None
model_title = None
else:
del menu_order[model_label]
if not model_title:
model_title = capfirst(model._meta.verbose_name_plural)
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": model_index,
"perms": model_admin.get_model_perms(request),
"name": model_title,
"admin_url": change_url,
"add_url": add_url
})
# Menu may also contain view or url pattern names given as (title, name).
for (item_url, item) in menu_order.items():
app_index, app_title, item_index, item_title = item
try:
item_url = reverse(item_url)
except NoReverseMatch:
continue
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": item_index,
"perms": {"custom": True},
"name": item_title,
"admin_url": item_url,
})
app_list = list(app_dict.values())
sort = lambda x: (x["index"] if x["index"] is not None else 999, x["name"])
for app in app_list:
app["models"].sort(key=sort)
app_list.sort(key=sort)
return app_list
@register.inclusion_tag("admin/includes/dropdown_menu.html",
takes_context=True)
def admin_dropdown_menu(context):
"""
Renders the app list for the admin dropdown menu navigation.
"""
user = context["request"].user
if user.is_staff:
context["dropdown_menu_app_list"] = admin_app_list(context["request"])
if user.is_superuser:
sites = Site.objects.all()
else:
sites = user.sitepermissions.sites.all()
context["dropdown_menu_sites"] = list(sites)
context["dropdown_menu_selected_site_id"] = current_site_id()
return context
@register.inclusion_tag("admin/includes/app_list.html", takes_context=True)
def app_list(context):
"""
Renders the app list for the admin dashboard widget.
"""
context["dashboard_app_list"] = admin_app_list(context["request"])
return context
@register.inclusion_tag("admin/includes/recent_actions.html",
takes_context=True)
def recent_actions(context):
"""
Renders the recent actions list for the admin dashboard widget.
"""
return context
@register.render_tag
def dashboard_column(context, token):
"""
Takes an index for retrieving the sequence of template tags from
``mezzanine.conf.DASHBOARD_TAGS`` to render into the admin
dashboard.
"""
column_index = int(token.split_contents()[1])
output = []
for tag in settings.DASHBOARD_TAGS[column_index]:
t = Template("{%% load %s %%}{%% %s %%}" % tuple(tag.split(".")))
output.append(t.render(Context(context)))
return "".join(output)
@register.simple_tag(takes_context=True)
def translate_url(context, language):
"""
Translates the current URL for the given language code, eg:
{% translate_url de %}
"""
try:
request = context["request"]
except KeyError:
return ""
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
try:
url_name = (view.url_name if not view.namespace
else '%s:%s' % (view.namespace, view.url_name))
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
url_name = "admin:" + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
translation.activate(current_language)
if context['request'].META["QUERY_STRING"]:
url += "?" + context['request'].META["QUERY_STRING"]
return url
|
|
"""Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
import sys
import os
import shutil
import subprocess
from copy import copy, deepcopy
from test.test_support import run_unittest, TESTFN, unlink, get_attribute
import sysconfig
from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var)
class TestSysConfig(unittest.TestCase):
def setUp(self):
"""Make a copy of sys.path"""
super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
self.makefile = None
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._uname = None
os.uname = self._get_uname
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = copy(sysconfig._CONFIG_VARS)
self.old_environ = deepcopy(os.environ)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
if self.makefile is not None:
os.unlink(self.makefile)
self._cleanup_testfn()
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = copy(self._config_vars)
for key, value in self.old_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
for key in os.environ.keys():
if key not in self.old_environ:
del os.environ[key]
super(TestSysConfig, self).tearDown()
def _set_uname(self, uname):
self._uname = uname
def _get_uname(self):
return self._uname
def _cleanup_testfn(self):
path = TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_path_names(self):
self.assertEquals(get_path_names(), sysconfig._SCHEME_KEYS)
def test_get_paths(self):
scheme = get_paths()
default_scheme = _get_default_scheme()
wanted = _expand_vars(default_scheme, None)
wanted = wanted.items()
wanted.sort()
scheme = scheme.items()
scheme.sort()
self.assertEquals(scheme, wanted)
def test_get_path(self):
# xxx make real tests here
for scheme in _INSTALL_SCHEMES:
for name in _INSTALL_SCHEMES[scheme]:
res = get_path(name, scheme)
def test_get_config_vars(self):
cvars = get_config_vars()
self.assertIsInstance(cvars, dict)
self.assertTrue(cvars)
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEquals(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEquals(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEquals(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxint
try:
sys.maxint = 2147483647
self.assertEquals(get_platform(), 'macosx-10.3-ppc')
sys.maxint = 9223372036854775807
self.assertEquals(get_platform(), 'macosx-10.3-ppc64')
finally:
sys.maxint = maxint
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxint
try:
sys.maxint = 2147483647
self.assertEquals(get_platform(), 'macosx-10.3-i386')
sys.maxint = 9223372036854775807
self.assertEquals(get_platform(), 'macosx-10.3-x86_64')
finally:
sys.maxint = maxint
# macbook with fat binaries (fat, universal or fat64)
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEquals(get_platform(), 'macosx-10.4-fat')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEquals(get_platform(), 'macosx-10.4-intel')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEquals(get_platform(), 'macosx-10.4-fat3')
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEquals(get_platform(), 'macosx-10.4-universal')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEquals(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3'%(arch,))
self.assertEquals(get_platform(), 'macosx-10.4-%s'%(arch,))
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEquals(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
self.assertTrue(os.path.isfile(config_h), config_h)
def test_get_scheme_names(self):
wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user')
self.assertEquals(get_scheme_names(), wanted)
def test_symlink(self):
# Issue 7880
symlink = get_attribute(os, "symlink")
def get(python):
cmd = [python, '-c',
'import sysconfig; print sysconfig.get_platform()']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(TESTFN)
symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
unlink(link)
def test_user_similar(self):
# Issue 8759 : make sure the posix scheme for the users
# is similar to the global posix_prefix one
base = get_config_var('base')
user = get_config_var('userbase')
for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'):
global_path = get_path(name, 'posix_prefix')
user_path = get_path(name, 'posix_user')
self.assertEquals(user_path, global_path.replace(base, user))
def test_main():
run_unittest(TestSysConfig)
if __name__ == "__main__":
test_main()
|
|
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for retrying functions with exponential back-off.
The :class:`Retry` decorator can be used to retry functions that raise
exceptions using exponential backoff. Because a exponential sleep algorithm is
used, the retry is limited by a `deadline`. The deadline is the maxmimum amount
of time a method can block. This is used instead of total number of retries
because it is difficult to ascertain the amount of time a function can block
when using total number of retries and exponential backoff.
By default, this decorator will retry transient
API errors (see :func:`if_transient_error`). For example:
.. code-block:: python
@retry.Retry()
def call_flaky_rpc():
return client.flaky_rpc()
# Will retry flaky_rpc() if it raises transient API errors.
result = call_flaky_rpc()
You can pass a custom predicate to retry on different exceptions, such as
waiting for an eventually consistent item to be available:
.. code-block:: python
@retry.Retry(predicate=if_exception_type(exceptions.NotFound))
def check_if_exists():
return client.does_thing_exist()
is_available = check_if_exists()
Some client library methods apply retry automatically. These methods can accept
a ``retry`` parameter that allows you to configure the behavior:
.. code-block:: python
my_retry = retry.Retry(deadline=60)
result = client.some_method(retry=my_retry)
"""
from __future__ import unicode_literals
import datetime
import functools
import logging
import random
import time
import six
from google.api_core import datetime_helpers
from google.api_core import exceptions
from google.api_core import general_helpers
_LOGGER = logging.getLogger(__name__)
_DEFAULT_INITIAL_DELAY = 1.0 # seconds
_DEFAULT_MAXIMUM_DELAY = 60.0 # seconds
_DEFAULT_DELAY_MULTIPLIER = 2.0
_DEFAULT_DEADLINE = 60.0 * 2.0 # seconds
def if_exception_type(*exception_types):
"""Creates a predicate to check if the exception is of a given type.
Args:
exception_types (Sequence[:func:`type`]): The exception types to check
for.
Returns:
Callable[Exception]: A predicate that returns True if the provided
exception is of the given type(s).
"""
def if_exception_type_predicate(exception):
"""Bound predicate for checking an exception type."""
return isinstance(exception, exception_types)
return if_exception_type_predicate
# pylint: disable=invalid-name
# Pylint sees this as a constant, but it is also an alias that should be
# considered a function.
if_transient_error = if_exception_type(
(exceptions.InternalServerError, exceptions.TooManyRequests)
)
"""A predicate that checks if an exception is a transient API error.
The following server errors are considered transient:
- :class:`google.api_core.exceptions.InternalServerError` - HTTP 500, gRPC
``INTERNAL(13)`` and its subclasses.
- :class:`google.api_core.exceptions.TooManyRequests` - HTTP 429
- :class:`google.api_core.exceptions.ResourceExhausted` - gRPC
``RESOURCE_EXHAUSTED(8)``
"""
# pylint: enable=invalid-name
def exponential_sleep_generator(initial, maximum, multiplier=_DEFAULT_DELAY_MULTIPLIER):
"""Generates sleep intervals based on the exponential back-off algorithm.
This implements the `Truncated Exponential Back-off`_ algorithm.
.. _Truncated Exponential Back-off:
https://cloud.google.com/storage/docs/exponential-backoff
Args:
initial (float): The minimum about of time to delay. This must
be greater than 0.
maximum (float): The maximum about of time to delay.
multiplier (float): The multiplier applied to the delay.
Yields:
float: successive sleep intervals.
"""
delay = initial
while True:
# Introduce jitter by yielding a delay that is uniformly distributed
# to average out to the delay time.
yield min(random.uniform(0.0, delay * 2.0), maximum)
delay = delay * multiplier
def retry_target(target, predicate, sleep_generator, deadline, on_error=None):
"""Call a function and retry if it fails.
This is the lowest-level retry helper. Generally, you'll use the
higher-level retry helper :class:`Retry`.
Args:
target(Callable): The function to call and retry. This must be a
nullary function - apply arguments with `functools.partial`.
predicate (Callable[Exception]): A callable used to determine if an
exception raised by the target should be considered retryable.
It should return True to retry or False otherwise.
sleep_generator (Iterable[float]): An infinite iterator that determines
how long to sleep between retries.
deadline (float): How long to keep retrying the target.
on_error (Callable): A function to call while processing a retryable
exception. Any error raised by this function will *not* be
caught.
Returns:
Any: the return value of the target function.
Raises:
google.api_core.RetryError: If the deadline is exceeded while retrying.
ValueError: If the sleep generator stops yielding values.
Exception: If the target raises a method that isn't retryable.
"""
if deadline is not None:
deadline_datetime = datetime_helpers.utcnow() + datetime.timedelta(
seconds=deadline
)
else:
deadline_datetime = None
last_exc = None
for sleep in sleep_generator:
try:
return target()
# pylint: disable=broad-except
# This function explicitly must deal with broad exceptions.
except Exception as exc:
if not predicate(exc):
raise
last_exc = exc
if on_error is not None:
on_error(exc)
now = datetime_helpers.utcnow()
if deadline_datetime is not None and deadline_datetime < now:
six.raise_from(
exceptions.RetryError(
"Deadline of {:.1f}s exceeded while calling {}".format(
deadline, target
),
last_exc,
),
last_exc,
)
_LOGGER.debug(
"Retrying due to {}, sleeping {:.1f}s ...".format(last_exc, sleep)
)
time.sleep(sleep)
raise ValueError("Sleep generator stopped yielding sleep values.")
@six.python_2_unicode_compatible
class Retry(object):
"""Exponential retry decorator.
This class is a decorator used to add exponential back-off retry behavior
to an RPC call.
Although the default behavior is to retry transient API errors, a
different predicate can be provided to retry other exceptions.
Args:
predicate (Callable[Exception]): A callable that should return ``True``
if the given exception is retryable.
initial (float): The minimum about of time to delay in seconds. This
must be greater than 0.
maximum (float): The maximum about of time to delay in seconds.
multiplier (float): The multiplier applied to the delay.
deadline (float): How long to keep retrying in seconds.
"""
def __init__(
self,
predicate=if_transient_error,
initial=_DEFAULT_INITIAL_DELAY,
maximum=_DEFAULT_MAXIMUM_DELAY,
multiplier=_DEFAULT_DELAY_MULTIPLIER,
deadline=_DEFAULT_DEADLINE,
):
self._predicate = predicate
self._initial = initial
self._multiplier = multiplier
self._maximum = maximum
self._deadline = deadline
def __call__(self, func, on_error=None):
"""Wrap a callable with retry behavior.
Args:
func (Callable): The callable to add retry behavior to.
on_error (Callable): A function to call while processing a
retryable exception. Any error raised by this function will
*not* be caught.
Returns:
Callable: A callable that will invoke ``func`` with retry
behavior.
"""
@general_helpers.wraps(func)
def retry_wrapped_func(*args, **kwargs):
"""A wrapper that calls target function with retry."""
target = functools.partial(func, *args, **kwargs)
sleep_generator = exponential_sleep_generator(
self._initial, self._maximum, multiplier=self._multiplier
)
return retry_target(
target,
self._predicate,
sleep_generator,
self._deadline,
on_error=on_error,
)
return retry_wrapped_func
def with_deadline(self, deadline):
"""Return a copy of this retry with the given deadline.
Args:
deadline (float): How long to keep retrying.
Returns:
Retry: A new retry instance with the given deadline.
"""
return Retry(
predicate=self._predicate,
initial=self._initial,
maximum=self._maximum,
multiplier=self._multiplier,
deadline=deadline,
)
def with_predicate(self, predicate):
"""Return a copy of this retry with the given predicate.
Args:
predicate (Callable[Exception]): A callable that should return
``True`` if the given exception is retryable.
Returns:
Retry: A new retry instance with the given predicate.
"""
return Retry(
predicate=predicate,
initial=self._initial,
maximum=self._maximum,
multiplier=self._multiplier,
deadline=self._deadline,
)
def with_delay(self, initial=None, maximum=None, multiplier=None):
"""Return a copy of this retry with the given delay options.
Args:
initial (float): The minimum about of time to delay. This must
be greater than 0.
maximum (float): The maximum about of time to delay.
multiplier (float): The multiplier applied to the delay.
Returns:
Retry: A new retry instance with the given predicate.
"""
return Retry(
predicate=self._predicate,
initial=initial if initial is not None else self._initial,
maximum=maximum if maximum is not None else self._maximum,
multiplier=multiplier if maximum is not None else self._multiplier,
deadline=self._deadline,
)
def __str__(self):
return (
"<Retry predicate={}, initial={:.1f}, maximum={:.1f}, "
"multiplier={:.1f}, deadline={:.1f}>".format(
self._predicate,
self._initial,
self._maximum,
self._multiplier,
self._deadline,
)
)
|
|
# $Id$
#
# Copyright (C) 2002-2006 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" tools for interacting with chemdraw
"""
from __future__ import print_function
import string, tempfile, os, time
try:
import pythoncom
from win32com.client import gencache, Dispatch, constants
import win32com.client.gencache
cdxModule = win32com.client.gencache.EnsureModule("{5F646AAB-3B56-48D2-904C-A68D7989C251}", 0, 7,
0)
except Exception:
cdxModule = None
_cdxVersion = 0
raise ImportError("ChemDraw version (at least version 7) not found.")
else:
_cdxVersion = 7
if cdxModule:
from win32com.client import Dispatch
import win32gui
import re
cdApp = None
theDoc = None
theObjs = None
selectItem = None
cleanItem = None
centerItem = None
def StartChemDraw(visible=True, openDoc=False, showDoc=False):
""" launches chemdraw """
global cdApp, theDoc, theObjs, selectItem, cleanItem, centerItem
if cdApp is not None:
# if called more than once, do a restart
holder = None
selectItem = None
cleanItem = None
centerItem = None
theObjs = None
theDoc = None
cdApp = None
cdApp = Dispatch('ChemDraw.Application')
if openDoc:
theDoc = cdApp.Documents.Add()
theObjs = theDoc.Objects
else:
theDoc = None
selectItem = cdApp.MenuBars(1).Menus(2).MenuItems(8)
cleanItem = cdApp.MenuBars(1).Menus(5).MenuItems(6)
if _cdxVersion == 6:
centerItem = cdApp.MenuBars(1).Menus(4).MenuItems(1)
else:
centerItem = cdApp.MenuBars(1).Menus(4).MenuItems(7)
if visible:
cdApp.Visible = 1
if theDoc and showDoc:
theDoc.Activate()
def ReactivateChemDraw(openDoc=True, showDoc=True):
global cdApp, theDoc, theObjs
cdApp.Visible = 1
if openDoc:
theDoc = cdApp.Documents.Add()
if theDoc and showDoc:
theDoc.Activate()
theObjs = theDoc.Objects
# ------------------------------------------------------------------
# interactions with Chemdraw
# ------------------------------------------------------------------
def CDXConvert(inData, inFormat, outFormat):
"""converts the data passed in from one format to another
inFormat should be one of the following:
chemical/x-cdx chemical/cdx
chemical/x-daylight-smiles chemical/daylight-smiles
chemical/x-mdl-isis chemical/mdl-isis
chemical/x-mdl-molfile chemical/mdl-molfile
chemical/x-mdl-rxn chemical/mdl-rxn
chemical/x-mdl-tgf chemical/mdl-tgf
chemical/x-questel-F1
chemical/x-questel-F1-query
outFormat should be one of the preceding or:
image/x-png image/png
image/x-wmf image/wmf
image/tiff
application/postscript
image/gif
"""
global theObjs, theDoc
if cdApp is None:
StartChemDraw()
if theObjs is None:
if theDoc is None:
theDoc = cdApp.Documents.Add()
theObjs = theDoc.Objects
theObjs.SetData(inFormat, inData, pythoncom.Missing)
outD = theObjs.GetData(outFormat)
theObjs.Clear()
return outD
def CDXClean(inData, inFormat, outFormat):
"""calls the CDXLib Clean function on the data passed in.
CDXLib_Clean attempts to clean (prettify) the data before
doing an output conversion. It can be thought of as CDXConvert++.
CDXClean supports the same input and output specifiers as CDXConvert
(see above)
"""
global cdApp, theDoc, theObjs, selectItem, cleanItem
if cdApp is None:
StartChemDraw()
if theObjs is None:
if theDoc is None:
theDoc = cdApp.Documents.Add()
theObjs = theDoc.Objects
theObjs.SetData(inFormat, inData, pythoncom.Missing)
theObjs.Select()
cleanItem.Execute()
outD = theObjs.GetData(outFormat)
theObjs.Clear()
return outD
def CDXDisplay(inData, inFormat='chemical/cdx', clear=1):
""" displays the data in Chemdraw """
global cdApp, theDoc, theObjs, selectItem, cleanItem, centerItem
if cdApp is None:
StartChemDraw()
try:
theDoc.Activate()
except Exception:
ReactivateChemDraw()
theObjs = theDoc.Objects
if clear:
theObjs.Clear()
theObjs.SetData(inFormat, inData, pythoncom.Missing)
return
def CDXGrab(outFormat='chemical/x-mdl-molfile'):
""" returns the contents of the active chemdraw document
"""
global cdApp, theDoc
if cdApp is None:
res = ""
else:
cdApp.Visible = 1
if not cdApp.ActiveDocument:
ReactivateChemDraw()
try:
res = cdApp.ActiveDocument.Objects.GetData(outFormat)
except Exception:
res = ""
return res
def CloseChemdraw():
""" shuts down chemdraw
"""
global cdApp
try:
cdApp.Quit()
except Exception:
pass
Exit()
def Exit():
""" destroys our link to Chemdraw
"""
global cdApp
cdApp = None
def SaveChemDrawDoc(fileName='save.cdx'):
"""force chemdraw to save the active document
NOTE: the extension of the filename will determine the format
used to save the file.
"""
d = cdApp.ActiveDocument
d.SaveAs(fileName)
def CloseChemDrawDoc():
"""force chemdraw to save the active document
NOTE: the extension of the filename will determine the format
used to save the file.
"""
d = cdApp.ActiveDocument
d.Close()
def RaiseWindowNamed(nameRe):
# start by getting a list of all the windows:
cb = lambda x, y: y.append(x)
wins = []
win32gui.EnumWindows(cb, wins)
# now check to see if any match our regexp:
tgtWin = -1
for win in wins:
txt = win32gui.GetWindowText(win)
if nameRe.match(txt):
tgtWin = win
break
if tgtWin >= 0:
win32gui.ShowWindow(tgtWin, 1)
win32gui.BringWindowToTop(tgtWin)
def RaiseChemDraw():
e = re.compile('^ChemDraw')
RaiseWindowNamed(e)
try:
from PIL import Image
from io import StringIO
def SmilesToPilImage(smilesStr):
"""takes a SMILES string and returns a PIL image using chemdraw
"""
return MolToPilImage(smilesStr, inFormat='chemical/daylight-smiles', outFormat='image/gif')
def MolToPilImage(dataStr, inFormat='chemical/daylight-smiles', outFormat='image/gif'):
"""takes a molecule string and returns a PIL image using chemdraw
"""
# do the conversion...
res = CDXConvert(dataStr, inFormat, outFormat)
dataFile = StringIO(str(res))
img = Image.open(dataFile).convert('RGB')
return img
except ImportError:
def SmilesToPilImage(smilesStr):
print('You need to have PIL installed to use this functionality')
return None
def MolToPilImage(dataStr, inFormat='chemical/daylight-smiles', outFormat='image/gif'):
print('You need to have PIL installed to use this functionality')
return None
# ------------------------------------------------------------------
# interactions with Chem3D
# ------------------------------------------------------------------
c3dApp = None
def StartChem3D(visible=0):
""" launches Chem3D """
global c3dApp
c3dApp = Dispatch('Chem3D.Application')
if not c3dApp.Visible:
c3dApp.Visible = visible
def CloseChem3D():
""" shuts down Chem3D """
global c3dApp
c3dApp.Quit()
c3dApp = None
availChem3DProps = ('DipoleMoment', 'BendEnergy', 'Non14VDWEnergy', 'StericEnergy',
'StretchBendEnergy', 'StretchEnergy', 'TorsionEnergy', 'VDW14Energy')
def Add3DCoordsToMol(data, format, props={}):
""" adds 3D coordinates to the data passed in using Chem3D
**Arguments**
- data: the molecular data
- format: the format of _data_. Should be something accepted by
_CDXConvert_
- props: (optional) a dictionary used to return calculated properties
"""
global c3dApp
if c3dApp is None:
StartChem3D()
if format != 'chemical/mdl-molfile':
molData = CDXClean(data, format, 'chemical/mdl-molfile')
else:
molData = data
molFName = tempfile.mktemp('.mol')
open(molFName, 'wb+').write(molData)
doc = c3dApp.Documents.Open(molFName)
if not doc:
print('cannot open molecule')
raise ValueError('No Molecule')
# set up the MM2 job
job = Dispatch('Chem3D.MM2Job')
job.Type = 1
job.DisplayEveryIteration = 0
job.RecordEveryIteration = 0
# start the calculation...
doc.MM2Compute(job)
# and wait for it to finish
while doc.ComputeStatus in [0x434f4d50, 0x50454e44]:
pass
#outFName = tempfile.mktemp('.mol')
# this is horrible, but apparently Chem3D gets pissy with tempfiles:
outFName = os.getcwd() + '/to3d.mol'
doc.SaveAs(outFName)
# generate the properties
for prop in availChem3DProps:
props[prop] = eval('doc.%s' % prop)
doc.Close(0)
os.unlink(molFName)
c3dData = open(outFName, 'r').read()
gone = 0
while not gone:
try:
os.unlink(outFName)
except Exception:
time.sleep(.5)
else:
gone = 1
return c3dData
def OptimizeSDFile(inFileName, outFileName, problemFileName='problems.sdf', restartEvery=20):
""" optimizes the structure of every molecule in the input SD file
**Arguments**
- inFileName: name of the input SD file
- outFileName: name of the output SD file
- problemFileName: (optional) name of the SD file used to store molecules which
fail during the optimization process
- restartEvery: (optional) Chem3D will be shut down and restarted
every _restartEvery_ molecules to try and keep core leaks under control
"""
inFile = open(inFileName, 'r')
outFile = open(outFileName, 'w+')
problemFile = None
props = {}
lines = []
nextLine = inFile.readline()
skip = 0
nDone = 0
t1 = time.time()
while nextLine != '':
if nextLine.find('M END') != -1:
lines.append(nextLine)
molBlock = string.join(lines, '')
try:
newMolBlock = Add3DCoordsToMol(molBlock, 'chemical/mdl-molfile', props=props)
except Exception:
badBlock = molBlock
skip = 1
lines = []
else:
skip = 0
lines = [newMolBlock]
elif nextLine.find('$$$$') != -1:
t2 = time.time()
nDone += 1
print('finished molecule %d in %f seconds' % (nDone, time.time() - t1))
t1 = time.time()
if nDone % restartEvery == 0:
CloseChem3D()
StartChem3D()
outFile.close()
outFile = open(outFileName, 'a')
if not skip:
for prop in props.keys():
lines.append('> <%s>\n%f\n\n' % (prop, props[prop]))
lines.append(nextLine)
outFile.write(string.join(lines, ''))
lines = []
else:
skip = 0
lines.append(nextLine)
if problemFile is None:
problemFile = open(problemFileName, 'w+')
problemFile.write(badBlock)
problemFile.write(string.join(lines, ''))
lines = []
else:
lines.append(nextLine)
nextLine = inFile.readline()
outFile.close()
if problemFile is not None:
problemFile.close()
if __name__ == '__main__':
inStr = 'CCC(C=O)CCC'
img = SmilesToPilImage(inStr)
img.save('foo.jpg')
convStr = CDXClean(inStr, 'chemical/x-daylight-smiles', 'chemical/x-daylight-smiles')
print('in:', inStr)
print('out:', convStr)
convStr = CDXConvert(inStr, 'chemical/x-daylight-smiles', 'chemical/x-mdl-molfile')
print('in:', inStr)
print('out:', convStr)
convStr2 = CDXClean(convStr, 'chemical/x-mdl-molfile', 'chemical/x-mdl-molfile')
print('out2:', convStr2)
inStr = 'COc1ccc(c2onc(c2C(=O)NCCc3ccc(F)cc3)c4ccc(F)cc4)c(OC)c1'
convStr = CDXConvert(inStr, 'chemical/x-daylight-smiles', 'chemical/x-mdl-molfile')
out = open('test.mol', 'w+')
out.write(convStr)
out.close()
|
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from operator import itemgetter
import warnings
from jax import random, vmap
from jax.lax import stop_gradient
import jax.numpy as jnp
from jax.scipy.special import logsumexp
from numpyro.distributions.kl import kl_divergence
from numpyro.distributions.util import scale_and_mask
from numpyro.handlers import replay, seed, substitute, trace
from numpyro.infer.util import get_importance_trace, log_density
from numpyro.util import _validate_model, check_model_guide_match, find_stack_level
class ELBO:
"""
Base class for all ELBO objectives.
Subclasses should implement either :meth:`loss` or :meth:`loss_with_mutable_state`.
:param num_particles: The number of particles/samples used to form the ELBO
(gradient) estimators.
"""
"""
Determines whether the ELBO objective can support inference of discrete latent variables.
Subclasses that are capable of inferring discrete latent variables should override to `True`
"""
can_infer_discrete = False
def __init__(self, num_particles=1):
self.num_particles = num_particles
def loss(self, rng_key, param_map, model, guide, *args, **kwargs):
"""
Evaluates the ELBO with an estimator that uses num_particles many samples/particles.
:param jax.random.PRNGKey rng_key: random number generator seed.
:param dict param_map: dictionary of current parameter values keyed by site
name.
:param model: Python callable with NumPyro primitives for the model.
:param guide: Python callable with NumPyro primitives for the guide.
:param args: arguments to the model / guide (these can possibly vary during
the course of fitting).
:param kwargs: keyword arguments to the model / guide (these can possibly vary
during the course of fitting).
:return: negative of the Evidence Lower Bound (ELBO) to be minimized.
"""
return self.loss_with_mutable_state(
rng_key, param_map, model, guide, *args, **kwargs
)["loss"]
def loss_with_mutable_state(
self, rng_key, param_map, model, guide, *args, **kwargs
):
"""
Likes :meth:`loss` but also update and return the mutable state, which stores the
values at :func:`~numpyro.mutable` sites.
:param jax.random.PRNGKey rng_key: random number generator seed.
:param dict param_map: dictionary of current parameter values keyed by site
name.
:param model: Python callable with NumPyro primitives for the model.
:param guide: Python callable with NumPyro primitives for the guide.
:param args: arguments to the model / guide (these can possibly vary during
the course of fitting).
:param kwargs: keyword arguments to the model / guide (these can possibly vary
during the course of fitting).
:return: a tuple of ELBO loss and the mutable state
"""
raise NotImplementedError("This ELBO objective does not support mutable state.")
class Trace_ELBO(ELBO):
"""
A trace implementation of ELBO-based SVI. The estimator is constructed
along the lines of references [1] and [2]. There are no restrictions on the
dependency structure of the model or the guide.
This is the most basic implementation of the Evidence Lower Bound, which is the
fundamental objective in Variational Inference. This implementation has various
limitations (for example it only supports random variables with reparameterized
samplers) but can be used as a template to build more sophisticated loss
objectives.
For more details, refer to http://pyro.ai/examples/svi_part_i.html.
**References:**
1. *Automated Variational Inference in Probabilistic Programming*,
David Wingate, Theo Weber
2. *Black Box Variational Inference*,
Rajesh Ranganath, Sean Gerrish, David M. Blei
:param num_particles: The number of particles/samples used to form the ELBO
(gradient) estimators.
"""
def __init__(self, num_particles=1):
self.num_particles = num_particles
def loss_with_mutable_state(
self, rng_key, param_map, model, guide, *args, **kwargs
):
def single_particle_elbo(rng_key):
params = param_map.copy()
model_seed, guide_seed = random.split(rng_key)
seeded_model = seed(model, model_seed)
seeded_guide = seed(guide, guide_seed)
guide_log_density, guide_trace = log_density(
seeded_guide, args, kwargs, param_map
)
mutable_params = {
name: site["value"]
for name, site in guide_trace.items()
if site["type"] == "mutable"
}
params.update(mutable_params)
seeded_model = replay(seeded_model, guide_trace)
model_log_density, model_trace = log_density(
seeded_model, args, kwargs, params
)
check_model_guide_match(model_trace, guide_trace)
_validate_model(model_trace, plate_warning="loose")
mutable_params.update(
{
name: site["value"]
for name, site in model_trace.items()
if site["type"] == "mutable"
}
)
# log p(z) - log q(z)
elbo_particle = model_log_density - guide_log_density
if mutable_params:
if self.num_particles == 1:
return elbo_particle, mutable_params
else:
raise ValueError(
"Currently, we only support mutable states with num_particles=1."
)
else:
return elbo_particle, None
# Return (-elbo) since by convention we do gradient descent on a loss and
# the ELBO is a lower bound that needs to be maximized.
if self.num_particles == 1:
elbo, mutable_state = single_particle_elbo(rng_key)
return {"loss": -elbo, "mutable_state": mutable_state}
else:
rng_keys = random.split(rng_key, self.num_particles)
elbos, mutable_state = vmap(single_particle_elbo)(rng_keys)
return {"loss": -jnp.mean(elbos), "mutable_state": mutable_state}
def _get_log_prob_sum(site):
if site["intermediates"]:
log_prob = site["fn"].log_prob(site["value"], site["intermediates"])
else:
log_prob = site["fn"].log_prob(site["value"])
log_prob = scale_and_mask(log_prob, site["scale"])
return jnp.sum(log_prob)
def _check_mean_field_requirement(model_trace, guide_trace):
"""
Checks that the guide and model sample sites are ordered identically.
This is sufficient but not necessary for correctness.
"""
model_sites = [
name
for name, site in model_trace.items()
if site["type"] == "sample" and name in guide_trace
]
guide_sites = [
name
for name, site in guide_trace.items()
if site["type"] == "sample" and name in model_trace
]
assert set(model_sites) == set(guide_sites)
if model_sites != guide_sites:
warnings.warn(
"Failed to verify mean field restriction on the guide. "
"To eliminate this warning, ensure model and guide sites "
"occur in the same order.\n"
+ "Model sites:\n "
+ "\n ".join(model_sites)
+ "Guide sites:\n "
+ "\n ".join(guide_sites),
stacklevel=find_stack_level(),
),
class TraceMeanField_ELBO(ELBO):
"""
A trace implementation of ELBO-based SVI. This is currently the only
ELBO estimator in NumPyro that uses analytic KL divergences when those
are available.
.. warning:: This estimator may give incorrect results if the mean-field
condition is not satisfied.
The mean field condition is a sufficient but not necessary condition for
this estimator to be correct. The precise condition is that for every
latent variable `z` in the guide, its parents in the model must not include
any latent variables that are descendants of `z` in the guide. Here
'parents in the model' and 'descendants in the guide' is with respect
to the corresponding (statistical) dependency structure. For example, this
condition is always satisfied if the model and guide have identical
dependency structures.
"""
def loss_with_mutable_state(
self, rng_key, param_map, model, guide, *args, **kwargs
):
def single_particle_elbo(rng_key):
params = param_map.copy()
model_seed, guide_seed = random.split(rng_key)
seeded_model = seed(model, model_seed)
seeded_guide = seed(guide, guide_seed)
subs_guide = substitute(seeded_guide, data=param_map)
guide_trace = trace(subs_guide).get_trace(*args, **kwargs)
mutable_params = {
name: site["value"]
for name, site in guide_trace.items()
if site["type"] == "mutable"
}
params.update(mutable_params)
subs_model = substitute(replay(seeded_model, guide_trace), data=params)
model_trace = trace(subs_model).get_trace(*args, **kwargs)
mutable_params.update(
{
name: site["value"]
for name, site in model_trace.items()
if site["type"] == "mutable"
}
)
check_model_guide_match(model_trace, guide_trace)
_validate_model(model_trace, plate_warning="loose")
_check_mean_field_requirement(model_trace, guide_trace)
elbo_particle = 0
for name, model_site in model_trace.items():
if model_site["type"] == "sample":
if model_site["is_observed"]:
elbo_particle = elbo_particle + _get_log_prob_sum(model_site)
else:
guide_site = guide_trace[name]
try:
kl_qp = kl_divergence(guide_site["fn"], model_site["fn"])
kl_qp = scale_and_mask(kl_qp, scale=guide_site["scale"])
elbo_particle = elbo_particle - jnp.sum(kl_qp)
except NotImplementedError:
elbo_particle = (
elbo_particle
+ _get_log_prob_sum(model_site)
- _get_log_prob_sum(guide_site)
)
# handle auxiliary sites in the guide
for name, site in guide_trace.items():
if site["type"] == "sample" and name not in model_trace:
assert site["infer"].get("is_auxiliary") or site["is_observed"]
elbo_particle = elbo_particle - _get_log_prob_sum(site)
if mutable_params:
if self.num_particles == 1:
return elbo_particle, mutable_params
else:
raise ValueError(
"Currently, we only support mutable states with num_particles=1."
)
else:
return elbo_particle, None
if self.num_particles == 1:
elbo, mutable_state = single_particle_elbo(rng_key)
return {"loss": -elbo, "mutable_state": mutable_state}
else:
rng_keys = random.split(rng_key, self.num_particles)
elbos, mutable_state = vmap(single_particle_elbo)(rng_keys)
return {"loss": -jnp.mean(elbos), "mutable_state": mutable_state}
class RenyiELBO(ELBO):
r"""
An implementation of Renyi's :math:`\alpha`-divergence
variational inference following reference [1].
In order for the objective to be a strict lower bound, we require
:math:`\alpha \ge 0`. Note, however, that according to reference [1], depending
on the dataset :math:`\alpha < 0` might give better results. In the special case
:math:`\alpha = 0`, the objective function is that of the important weighted
autoencoder derived in reference [2].
.. note:: Setting :math:`\alpha < 1` gives a better bound than the usual ELBO.
:param float alpha: The order of :math:`\alpha`-divergence.
Here :math:`\alpha \neq 1`. Default is 0.
:param num_particles: The number of particles/samples
used to form the objective (gradient) estimator. Default is 2.
**References:**
1. *Renyi Divergence Variational Inference*, Yingzhen Li, Richard E. Turner
2. *Importance Weighted Autoencoders*, Yuri Burda, Roger Grosse, Ruslan Salakhutdinov
"""
def __init__(self, alpha=0, num_particles=2):
if alpha == 1:
raise ValueError(
"The order alpha should not be equal to 1. Please use ELBO class"
"for the case alpha = 1."
)
self.alpha = alpha
super().__init__(num_particles=num_particles)
def loss(self, rng_key, param_map, model, guide, *args, **kwargs):
def single_particle_elbo(rng_key):
model_seed, guide_seed = random.split(rng_key)
seeded_model = seed(model, model_seed)
seeded_guide = seed(guide, guide_seed)
guide_log_density, guide_trace = log_density(
seeded_guide, args, kwargs, param_map
)
# NB: we only want to substitute params not available in guide_trace
model_param_map = {
k: v for k, v in param_map.items() if k not in guide_trace
}
seeded_model = replay(seeded_model, guide_trace)
model_log_density, model_trace = log_density(
seeded_model, args, kwargs, model_param_map
)
check_model_guide_match(model_trace, guide_trace)
_validate_model(model_trace, plate_warning="loose")
# log p(z) - log q(z)
elbo = model_log_density - guide_log_density
return elbo
rng_keys = random.split(rng_key, self.num_particles)
elbos = vmap(single_particle_elbo)(rng_keys)
scaled_elbos = (1.0 - self.alpha) * elbos
avg_log_exp = logsumexp(scaled_elbos) - jnp.log(self.num_particles)
weights = jnp.exp(scaled_elbos - avg_log_exp)
renyi_elbo = avg_log_exp / (1.0 - self.alpha)
weighted_elbo = jnp.dot(stop_gradient(weights), elbos) / self.num_particles
return -(stop_gradient(renyi_elbo - weighted_elbo) + weighted_elbo)
def _get_plate_stacks(trace):
"""
This builds a dict mapping site name to a set of plate stacks. Each
plate stack is a list of :class:`CondIndepStackFrame`s corresponding to
a :class:`plate`. This information is used by :class:`Trace_ELBO` and
:class:`TraceGraph_ELBO`.
"""
return {
name: [f for f in node["cond_indep_stack"]]
for name, node in trace.items()
if node["type"] == "sample"
}
class MultiFrameTensor(dict):
"""
A container for sums of Tensors among different :class:`plate` contexts.
Used in :class:`~numpyro.infer.elbo.TraceGraph_ELBO` to simplify
downstream cost computation logic.
Example::
downstream_cost = MultiFrameTensor()
for site in downstream_nodes:
downstream_cost.add((site["cond_indep_stack"], site["log_prob"]))
downstream_cost.add(*other_costs.items()) # add in bulk
summed = downstream_cost.sum_to(target_site["cond_indep_stack"])
"""
def __init__(self, *items):
super().__init__()
self.add(*items)
def add(self, *items):
"""
Add a collection of (cond_indep_stack, tensor) pairs. Keys are
``cond_indep_stack``s, i.e. tuples of :class:`CondIndepStackFrame`s.
Values are :class:`numpy.ndarray`s.
"""
for cond_indep_stack, value in items:
frames = frozenset(f for f in cond_indep_stack)
assert all(f.dim < 0 and -jnp.ndim(value) <= f.dim for f in frames)
if frames in self:
self[frames] = self[frames] + value
else:
self[frames] = value
def sum_to(self, target_frames):
total = None
for frames, value in self.items():
for f in frames:
if f not in target_frames and jnp.shape(value)[f.dim] != 1:
value = value.sum(f.dim, keepdims=True)
while jnp.shape(value) and jnp.shape(value)[0] == 1:
value = value.squeeze(0)
total = value if total is None else total + value
return 0.0 if total is None else total
def __repr__(self):
return "%s(%s)" % (
type(self).__name__,
",\n\t".join(["({}, ...)".format(frames) for frames in self]),
)
def _identify_dense_edges(trace):
succ = {}
for name, node in trace.items():
if node["type"] == "sample":
succ[name] = set()
for name, node in trace.items():
if node["type"] == "sample":
for past_name, past_node in trace.items():
if past_node["type"] == "sample":
if past_name == name:
break
# XXX: different from Pyro, we always add edge past_name -> name
succ[past_name].add(name)
return succ
def _topological_sort(succ, reverse=False):
"""
Return a list of nodes (site names) in topologically sorted order.
"""
def dfs(site, visited):
if site in visited:
return
for s in succ[site]:
for node in dfs(s, visited):
yield node
visited.add(site)
yield site
visited = set()
top_sorted = []
for s in succ:
for node in dfs(s, visited):
top_sorted.append(node)
return top_sorted if reverse else list(reversed(top_sorted))
def _compute_downstream_costs(model_trace, guide_trace, non_reparam_nodes):
model_successors = _identify_dense_edges(model_trace)
guide_successors = _identify_dense_edges(guide_trace)
# recursively compute downstream cost nodes for all sample sites in model and guide
# (even though ultimately just need for non-reparameterizable sample sites)
# 1. downstream costs used for rao-blackwellization
# 2. model observe sites (as well as terms that arise from the model and guide having different
# dependency structures) are taken care of via 'children_in_model' below
topo_sort_guide_nodes = _topological_sort(guide_successors, reverse=True)
topo_sort_guide_nodes = [
x for x in topo_sort_guide_nodes if guide_trace[x]["type"] == "sample"
]
ordered_guide_nodes_dict = {n: i for i, n in enumerate(topo_sort_guide_nodes)}
downstream_guide_cost_nodes = {}
downstream_costs = {}
stacks = _get_plate_stacks(model_trace)
for node in topo_sort_guide_nodes:
downstream_costs[node] = MultiFrameTensor(
(
stacks[node],
model_trace[node]["log_prob"] - guide_trace[node]["log_prob"],
)
)
nodes_included_in_sum = set([node])
downstream_guide_cost_nodes[node] = set([node])
# make more efficient by ordering children appropriately (higher children first)
children = [(k, -ordered_guide_nodes_dict[k]) for k in guide_successors[node]]
sorted_children = sorted(children, key=itemgetter(1))
for child, _ in sorted_children:
child_cost_nodes = downstream_guide_cost_nodes[child]
downstream_guide_cost_nodes[node].update(child_cost_nodes)
if nodes_included_in_sum.isdisjoint(child_cost_nodes): # avoid duplicates
downstream_costs[node].add(*downstream_costs[child].items())
# XXX nodes_included_in_sum logic could be more fine-grained, possibly leading
# to speed-ups in case there are many duplicates
nodes_included_in_sum.update(child_cost_nodes)
missing_downstream_costs = (
downstream_guide_cost_nodes[node] - nodes_included_in_sum
)
# include terms we missed because we had to avoid duplicates
for missing_node in missing_downstream_costs:
downstream_costs[node].add(
(
stacks[missing_node],
model_trace[missing_node]["log_prob"]
- guide_trace[missing_node]["log_prob"],
)
)
# finish assembling complete downstream costs
# (the above computation may be missing terms from model)
for site in non_reparam_nodes:
children_in_model = set()
for node in downstream_guide_cost_nodes[site]:
children_in_model.update(model_successors[node])
# remove terms accounted for above
children_in_model.difference_update(downstream_guide_cost_nodes[site])
for child in children_in_model:
assert model_trace[child]["type"] == "sample"
downstream_costs[site].add((stacks[child], model_trace[child]["log_prob"]))
downstream_guide_cost_nodes[site].update([child])
for k in non_reparam_nodes:
downstream_costs[k] = downstream_costs[k].sum_to(
guide_trace[k]["cond_indep_stack"]
)
return downstream_costs, downstream_guide_cost_nodes
class TraceGraph_ELBO(ELBO):
"""
A TraceGraph implementation of ELBO-based SVI. The gradient estimator
is constructed along the lines of reference [1] specialized to the case
of the ELBO. It supports arbitrary dependency structure for the model
and guide.
Where possible, conditional dependency information as recorded in the
trace is used to reduce the variance of the gradient estimator.
In particular two kinds of conditional dependency information are
used to reduce variance:
- the sequential order of samples (z is sampled after y => y does not depend on z)
- :class:`~numpyro.plate` generators
References
[1] `Gradient Estimation Using Stochastic Computation Graphs`,
John Schulman, Nicolas Heess, Theophane Weber, Pieter Abbeel
"""
can_infer_discrete = True
def __init__(self, num_particles=1):
super().__init__(num_particles=num_particles)
def loss(self, rng_key, param_map, model, guide, *args, **kwargs):
"""
Evaluates the ELBO with an estimator that uses num_particles many samples/particles.
:param jax.random.PRNGKey rng_key: random number generator seed.
:param dict param_map: dictionary of current parameter values keyed by site
name.
:param model: Python callable with NumPyro primitives for the model.
:param guide: Python callable with NumPyro primitives for the guide.
:param args: arguments to the model / guide (these can possibly vary during
the course of fitting).
:param kwargs: keyword arguments to the model / guide (these can possibly vary
during the course of fitting).
:return: negative of the Evidence Lower Bound (ELBO) to be minimized.
"""
def single_particle_elbo(rng_key):
model_seed, guide_seed = random.split(rng_key)
seeded_model = seed(model, model_seed)
seeded_guide = seed(guide, guide_seed)
model_trace, guide_trace = get_importance_trace(
seeded_model, seeded_guide, args, kwargs, param_map
)
check_model_guide_match(model_trace, guide_trace)
_validate_model(model_trace, plate_warning="strict")
# XXX: different from Pyro, we don't support baseline_loss here
non_reparam_nodes = {
name
for name, site in guide_trace.items()
if site["type"] == "sample"
and (not site["is_observed"])
and (not site["fn"].has_rsample)
}
if non_reparam_nodes:
downstream_costs, _ = _compute_downstream_costs(
model_trace, guide_trace, non_reparam_nodes
)
elbo = 0.0
for site in model_trace.values():
if site["type"] == "sample":
elbo = elbo + jnp.sum(site["log_prob"])
for name, site in guide_trace.items():
if site["type"] == "sample":
log_prob_sum = jnp.sum(site["log_prob"])
if name in non_reparam_nodes:
surrogate = jnp.sum(
site["log_prob"] * stop_gradient(downstream_costs[name])
)
log_prob_sum = (
stop_gradient(log_prob_sum + surrogate) - surrogate
)
elbo = elbo - log_prob_sum
return elbo
# Return (-elbo) since by convention we do gradient descent on a loss and
# the ELBO is a lower bound that needs to be maximized.
if self.num_particles == 1:
return -single_particle_elbo(rng_key)
else:
rng_keys = random.split(rng_key, self.num_particles)
return -jnp.mean(vmap(single_particle_elbo)(rng_keys))
|
|
"""
Provide the functionality to group entities.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/group/
"""
import asyncio
import logging
import os
import voluptuous as vol
from homeassistant import config as conf_util, core as ha
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_ICON, CONF_NAME, STATE_CLOSED, STATE_HOME,
STATE_NOT_HOME, STATE_OFF, STATE_ON, STATE_OPEN, STATE_LOCKED,
STATE_UNLOCKED, STATE_OK, STATE_PROBLEM, STATE_UNKNOWN,
ATTR_ASSUMED_STATE, SERVICE_RELOAD)
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_state_change
import homeassistant.helpers.config_validation as cv
from homeassistant.util.async import run_coroutine_threadsafe
DOMAIN = 'group'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
CONF_ENTITIES = 'entities'
CONF_VIEW = 'view'
CONF_CONTROL = 'control'
ATTR_ADD_ENTITIES = 'add_entities'
ATTR_AUTO = 'auto'
ATTR_CONTROL = 'control'
ATTR_ENTITIES = 'entities'
ATTR_ICON = 'icon'
ATTR_NAME = 'name'
ATTR_OBJECT_ID = 'object_id'
ATTR_ORDER = 'order'
ATTR_VIEW = 'view'
ATTR_VISIBLE = 'visible'
SERVICE_SET_VISIBILITY = 'set_visibility'
SERVICE_SET = 'set'
SERVICE_REMOVE = 'remove'
CONTROL_TYPES = vol.In(['hidden', None])
SET_VISIBILITY_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_VISIBLE): cv.boolean
})
RELOAD_SERVICE_SCHEMA = vol.Schema({})
SET_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_OBJECT_ID): cv.slug,
vol.Optional(ATTR_NAME): cv.string,
vol.Optional(ATTR_VIEW): cv.boolean,
vol.Optional(ATTR_ICON): cv.string,
vol.Optional(ATTR_CONTROL): CONTROL_TYPES,
vol.Optional(ATTR_VISIBLE): cv.boolean,
vol.Exclusive(ATTR_ENTITIES, 'entities'): cv.entity_ids,
vol.Exclusive(ATTR_ADD_ENTITIES, 'entities'): cv.entity_ids,
})
REMOVE_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_OBJECT_ID): cv.slug,
})
_LOGGER = logging.getLogger(__name__)
def _conf_preprocess(value):
"""Preprocess alternative configuration formats."""
if not isinstance(value, dict):
value = {CONF_ENTITIES: value}
return value
GROUP_SCHEMA = vol.Schema({
vol.Optional(CONF_ENTITIES): vol.Any(cv.entity_ids, None),
CONF_VIEW: cv.boolean,
CONF_NAME: cv.string,
CONF_ICON: cv.icon,
CONF_CONTROL: CONTROL_TYPES,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({cv.match_all: vol.All(_conf_preprocess, GROUP_SCHEMA)})
}, extra=vol.ALLOW_EXTRA)
# List of ON/OFF state tuples for groupable states
_GROUP_TYPES = [(STATE_ON, STATE_OFF), (STATE_HOME, STATE_NOT_HOME),
(STATE_OPEN, STATE_CLOSED), (STATE_LOCKED, STATE_UNLOCKED),
(STATE_PROBLEM, STATE_OK)]
def _get_group_on_off(state):
"""Determine the group on/off states based on a state."""
for states in _GROUP_TYPES:
if state in states:
return states
return None, None
def is_on(hass, entity_id):
"""Test if the group state is in its ON-state."""
state = hass.states.get(entity_id)
if state:
group_on, _ = _get_group_on_off(state.state)
# If we found a group_type, compare to ON-state
return group_on is not None and state.state == group_on
return False
def reload(hass):
"""Reload the automation from config."""
hass.add_job(async_reload, hass)
@callback
def async_reload(hass):
"""Reload the automation from config."""
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_RELOAD))
def set_visibility(hass, entity_id=None, visible=True):
"""Hide or shows a group."""
data = {ATTR_ENTITY_ID: entity_id, ATTR_VISIBLE: visible}
hass.services.call(DOMAIN, SERVICE_SET_VISIBILITY, data)
def set_group(hass, object_id, name=None, entity_ids=None, visible=None,
icon=None, view=None, control=None, add=None):
"""Create a new user group."""
hass.add_job(
async_set_group, hass, object_id, name, entity_ids, visible, icon,
view, control, add)
@callback
def async_set_group(hass, object_id, name=None, entity_ids=None, visible=None,
icon=None, view=None, control=None, add=None):
"""Create a new user group."""
data = {
key: value for key, value in [
(ATTR_OBJECT_ID, object_id),
(ATTR_NAME, name),
(ATTR_ENTITIES, entity_ids),
(ATTR_VISIBLE, visible),
(ATTR_ICON, icon),
(ATTR_VIEW, view),
(ATTR_CONTROL, control),
(ATTR_ADD_ENTITIES, add),
] if value is not None
}
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_SET, data))
def remove(hass, name):
"""Remove a user group."""
hass.add_job(async_remove, hass, name)
@callback
def async_remove(hass, object_id):
"""Remove a user group."""
data = {ATTR_OBJECT_ID: object_id}
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_REMOVE, data))
def expand_entity_ids(hass, entity_ids):
"""Return entity_ids with group entity ids replaced by their members.
Async friendly.
"""
found_ids = []
for entity_id in entity_ids:
if not isinstance(entity_id, str):
continue
entity_id = entity_id.lower()
try:
# If entity_id points at a group, expand it
domain, _ = ha.split_entity_id(entity_id)
if domain == DOMAIN:
found_ids.extend(
ent_id for ent_id
in expand_entity_ids(hass, get_entity_ids(hass, entity_id))
if ent_id not in found_ids)
else:
if entity_id not in found_ids:
found_ids.append(entity_id)
except AttributeError:
# Raised by split_entity_id if entity_id is not a string
pass
return found_ids
def get_entity_ids(hass, entity_id, domain_filter=None):
"""Get members of this group.
Async friendly.
"""
group = hass.states.get(entity_id)
if not group or ATTR_ENTITY_ID not in group.attributes:
return []
entity_ids = group.attributes[ATTR_ENTITY_ID]
if not domain_filter:
return entity_ids
domain_filter = domain_filter.lower() + '.'
return [ent_id for ent_id in entity_ids
if ent_id.startswith(domain_filter)]
@asyncio.coroutine
def async_setup(hass, config):
"""Set up all groups found definded in the configuration."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
service_groups = {}
yield from _async_process_config(hass, config, component)
descriptions = yield from hass.async_add_job(
conf_util.load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml')
)
@asyncio.coroutine
def reload_service_handler(service):
"""Remove all groups and load new ones from config."""
conf = yield from component.async_prepare_reload()
if conf is None:
return
yield from _async_process_config(hass, conf, component)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler,
descriptions[DOMAIN][SERVICE_RELOAD], schema=RELOAD_SERVICE_SCHEMA)
@asyncio.coroutine
def groups_service_handler(service):
"""Handle dynamic group service functions."""
object_id = service.data[ATTR_OBJECT_ID]
# new group
if service.service == SERVICE_SET and object_id not in service_groups:
entity_ids = service.data.get(ATTR_ENTITIES) or \
service.data.get(ATTR_ADD_ENTITIES) or None
extra_arg = {attr: service.data[attr] for attr in (
ATTR_VISIBLE, ATTR_ICON, ATTR_VIEW, ATTR_CONTROL
) if service.data.get(attr) is not None}
new_group = yield from Group.async_create_group(
hass, service.data.get(ATTR_NAME, object_id),
object_id=object_id,
entity_ids=entity_ids,
user_defined=False,
**extra_arg
)
service_groups[object_id] = new_group
return
# update group
if service.service == SERVICE_SET:
group = service_groups[object_id]
need_update = False
if ATTR_ADD_ENTITIES in service.data:
delta = service.data[ATTR_ADD_ENTITIES]
entity_ids = set(group.tracking) | set(delta)
yield from group.async_update_tracked_entity_ids(entity_ids)
if ATTR_ENTITIES in service.data:
entity_ids = service.data[ATTR_ENTITIES]
yield from group.async_update_tracked_entity_ids(entity_ids)
if ATTR_NAME in service.data:
group.name = service.data[ATTR_NAME]
need_update = True
if ATTR_VISIBLE in service.data:
group.visible = service.data[ATTR_VISIBLE]
need_update = True
if ATTR_ICON in service.data:
group.icon = service.data[ATTR_ICON]
need_update = True
if ATTR_CONTROL in service.data:
group.control = service.data[ATTR_CONTROL]
need_update = True
if ATTR_VIEW in service.data:
group.view = service.data[ATTR_VIEW]
need_update = True
if need_update:
yield from group.async_update_ha_state()
return
# remove group
if service.service == SERVICE_REMOVE:
if object_id not in service_groups:
_LOGGER.warning("Group '%s' not exists!", object_id)
return
del_group = service_groups.pop(object_id)
yield from del_group.async_stop()
hass.services.async_register(
DOMAIN, SERVICE_SET, groups_service_handler,
descriptions[DOMAIN][SERVICE_SET], schema=SET_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_REMOVE, groups_service_handler,
descriptions[DOMAIN][SERVICE_REMOVE], schema=REMOVE_SERVICE_SCHEMA)
@asyncio.coroutine
def visibility_service_handler(service):
"""Change visibility of a group."""
visible = service.data.get(ATTR_VISIBLE)
tasks = []
for group in component.async_extract_from_service(service,
expand_group=False):
group.visible = visible
tasks.append(group.async_update_ha_state())
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
hass.services.async_register(
DOMAIN, SERVICE_SET_VISIBILITY, visibility_service_handler,
descriptions[DOMAIN][SERVICE_SET_VISIBILITY],
schema=SET_VISIBILITY_SERVICE_SCHEMA)
return True
@asyncio.coroutine
def _async_process_config(hass, config, component):
"""Process group configuration."""
groups = []
for object_id, conf in config.get(DOMAIN, {}).items():
name = conf.get(CONF_NAME, object_id)
entity_ids = conf.get(CONF_ENTITIES) or []
icon = conf.get(CONF_ICON)
view = conf.get(CONF_VIEW)
control = conf.get(CONF_CONTROL)
# Don't create tasks and await them all. The order is important as
# groups get a number based on creation order.
group = yield from Group.async_create_group(
hass, name, entity_ids, icon=icon, view=view,
control=control, object_id=object_id)
groups.append(group)
if groups:
yield from component.async_add_entities(groups)
class Group(Entity):
"""Track a group of entity ids."""
def __init__(self, hass, name, order=None, visible=True, icon=None,
view=False, control=None, user_defined=True):
"""Initialize a group.
This Object has factory function for creation.
"""
self.hass = hass
self._name = name
self._state = STATE_UNKNOWN
self._icon = icon
self.view = view
self.tracking = []
self.group_on = None
self.group_off = None
self.visible = visible
self.control = control
self._user_defined = user_defined
self._order = order
self._assumed_state = False
self._async_unsub_state_changed = None
@staticmethod
def create_group(hass, name, entity_ids=None, user_defined=True,
visible=True, icon=None, view=False, control=None,
object_id=None):
"""Initialize a group."""
return run_coroutine_threadsafe(
Group.async_create_group(
hass, name, entity_ids, user_defined, visible, icon, view,
control, object_id),
hass.loop).result()
@staticmethod
@asyncio.coroutine
def async_create_group(hass, name, entity_ids=None, user_defined=True,
visible=True, icon=None, view=False, control=None,
object_id=None):
"""Initialize a group.
This method must be run in the event loop.
"""
group = Group(
hass, name,
order=len(hass.states.async_entity_ids(DOMAIN)),
visible=visible, icon=icon, view=view, control=control,
user_defined=user_defined
)
group.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, object_id or name, hass=hass)
# run other async stuff
if entity_ids is not None:
yield from group.async_update_tracked_entity_ids(entity_ids)
else:
yield from group.async_update_ha_state(True)
return group
@property
def should_poll(self):
"""No need to poll because groups will update themselves."""
return False
@property
def name(self):
"""Return the name of the group."""
return self._name
@name.setter
def name(self, value):
"""Set Group name."""
self._name = value
@property
def state(self):
"""Return the state of the group."""
return self._state
@property
def icon(self):
"""Return the icon of the group."""
return self._icon
@icon.setter
def icon(self, value):
"""Set Icon for group."""
self._icon = value
@property
def hidden(self):
"""If group should be hidden or not."""
if self.visible and not self.view:
return False
return True
@property
def state_attributes(self):
"""Return the state attributes for the group."""
data = {
ATTR_ENTITY_ID: self.tracking,
ATTR_ORDER: self._order,
}
if not self._user_defined:
data[ATTR_AUTO] = True
if self.view:
data[ATTR_VIEW] = True
if self.control:
data[ATTR_CONTROL] = self.control
return data
@property
def assumed_state(self):
"""Test if any member has an assumed state."""
return self._assumed_state
def update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs."""
run_coroutine_threadsafe(
self.async_update_tracked_entity_ids(entity_ids), self.hass.loop
).result()
@asyncio.coroutine
def async_update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs.
This method must be run in the event loop.
"""
yield from self.async_stop()
self.tracking = tuple(ent_id.lower() for ent_id in entity_ids)
self.group_on, self.group_off = None, None
yield from self.async_update_ha_state(True)
self.async_start()
def start(self):
"""Start tracking members."""
self.hass.add_job(self.async_start)
@callback
def async_start(self):
"""Start tracking members.
This method must be run in the event loop.
"""
if self._async_unsub_state_changed is None:
self._async_unsub_state_changed = async_track_state_change(
self.hass, self.tracking, self._async_state_changed_listener
)
def stop(self):
"""Unregister the group from Home Assistant."""
run_coroutine_threadsafe(self.async_stop(), self.hass.loop).result()
@asyncio.coroutine
def async_stop(self):
"""Unregister the group from Home Assistant.
This method must be run in the event loop.
"""
yield from self.async_remove()
@asyncio.coroutine
def async_update(self):
"""Query all members and determine current group state."""
self._state = STATE_UNKNOWN
self._async_update_group_state()
def async_remove(self):
"""Remove group from HASS.
This method must be run in the event loop and returns a coroutine.
"""
if self._async_unsub_state_changed:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
return super().async_remove()
@asyncio.coroutine
def _async_state_changed_listener(self, entity_id, old_state, new_state):
"""Respond to a member state changing.
This method must be run in the event loop.
"""
# removed
if self._async_unsub_state_changed is None:
return
self._async_update_group_state(new_state)
yield from self.async_update_ha_state()
@property
def _tracking_states(self):
"""Return the states that the group is tracking."""
states = []
for entity_id in self.tracking:
state = self.hass.states.get(entity_id)
if state is not None:
states.append(state)
return states
@callback
def _async_update_group_state(self, tr_state=None):
"""Update group state.
Optionally you can provide the only state changed since last update
allowing this method to take shortcuts.
This method must be run in the event loop.
"""
# To store current states of group entities. Might not be needed.
states = None
gr_state = self._state
gr_on = self.group_on
gr_off = self.group_off
# We have not determined type of group yet
if gr_on is None:
if tr_state is None:
states = self._tracking_states
for state in states:
gr_on, gr_off = \
_get_group_on_off(state.state)
if gr_on is not None:
break
else:
gr_on, gr_off = _get_group_on_off(tr_state.state)
if gr_on is not None:
self.group_on, self.group_off = gr_on, gr_off
# We cannot determine state of the group
if gr_on is None:
return
if tr_state is None or ((gr_state == gr_on and
tr_state.state == gr_off) or
tr_state.state not in (gr_on, gr_off)):
if states is None:
states = self._tracking_states
if any(state.state == gr_on for state in states):
self._state = gr_on
else:
self._state = gr_off
elif tr_state.state in (gr_on, gr_off):
self._state = tr_state.state
if tr_state is None or self._assumed_state and \
not tr_state.attributes.get(ATTR_ASSUMED_STATE):
if states is None:
states = self._tracking_states
self._assumed_state = any(
state.attributes.get(ATTR_ASSUMED_STATE) for state
in states)
elif tr_state.attributes.get(ATTR_ASSUMED_STATE):
self._assumed_state = True
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import os
import itertools
import sys
import threading
import urllib2
from conary import callbacks
from conary import conaryclient
from conary import display
from conary import errors
from conary import trove
from conary import trovetup
from conary import versions
from conary.deps import deps
from conary.lib import api
from conary.lib import log
from conary.lib import util
from conary.local import database
from conary.repository import changeset, filecontainer
from conary.conaryclient import cmdline, modelupdate
from conary.conaryclient.cmdline import parseTroveSpec
# FIXME client should instantiated once per execution of the command line
# conary client
class CriticalUpdateInfo(conaryclient.CriticalUpdateInfo):
criticalTroveRegexps = ['conary:.*']
def locked(method):
# this decorator used to be defined in UpdateCallback
# The problem is you cannot subclass UpdateCallback and use the decorator
# because python complains it is an unbound function.
# And you can't define it as @staticmethod either, it would break the
# decorated functions.
# Somewhat related (staticmethod objects not callable) topic:
# http://mail.python.org/pipermail/python-dev/2006-March/061948.html
def wrapper(self, *args, **kwargs):
self.lock.acquire()
try:
return method(self, *args, **kwargs)
finally:
self.lock.release()
wrapper.__doc__ = method.__doc__
wrapper.func_name = method.func_name
return wrapper
class UpdateCallback(callbacks.LineOutput, callbacks.UpdateCallback):
def done(self):
"""
@see: callbacks.UpdateCallback.done
"""
self._message('')
def _message(self, text):
"""
Called when this callback object needs to output progress information.
The information is written to stdout.
@return: None
"""
callbacks.LineOutput._message(self, text)
def update(self):
"""
Called by this callback object to update the status. This method
sanitizes text. This method is not thread safe - obtain a lock before
calling.
@return: None
"""
t = ""
if self.updateText:
t += self.updateText
if self.csText:
t = self.csText + ' '
if t and len(t) < 76:
t = t[:76]
t += '...'
self._message(t)
@locked
def updateMsg(self, text):
"""
Called when the update thread has status updates.
@param text: new status text
@type text: string
@return: None
"""
self.updateText = text
self.update()
@locked
def csMsg(self, text):
"""
Called when the download thread has status updates.
@param text: new status text
@type text: string
@return: None
"""
self.csText = text
self.update()
def executingSystemModel(self):
self.updateMsg("Processing system model")
def loadingModelCache(self):
self.updateMsg("Loading system model cache")
def savingModelCache(self):
self.updateMsg("Saving system model cache")
def preparingChangeSet(self):
"""
@see: callbacks.ChangesetCallback.preparingChangeSet
"""
self.updateMsg("Preparing changeset request")
def resolvingDependencies(self):
"""
@see: callbacks.UpdateCallback.resolvingDependencies
"""
self.updateMsg("Resolving dependencies")
@locked
def updateDone(self):
"""
@see: callbacks.UpdateCallback.updateDone
"""
self._message('')
self.updateText = None
@locked
def _downloading(self, msg, got, rate, need):
"""
Called by this callback object to handle different kinds of
download-related progress information. This method puts together
download rate information.
@param msg: status message
@type msg: string
@param got: number of bytes retrieved so far
@type got: integer
@param rate: bytes per second
@type rate: integer
@param need: number of bytes total to be retrieved
@type need: integer
@return: None
"""
# This function acquires a lock just because it looks at self.csHunk
# and self.updateText directly. Otherwise, self.csMsg will acquire the
# lock (which is now reentrant)
if got == need:
self.csMsg(None)
elif need != 0:
if self.csHunk[1] < 2 or not self.updateText:
self.csMsg("%s %dKB (%d%%) of %dKB at %dKB/sec"
% (msg, got/1024, (got*100)/need, need/1024, rate/1024))
else:
self.csMsg("%s %d of %d: %dKB (%d%%) of %dKB at %dKB/sec"
% ((msg,) + self.csHunk + \
(got/1024, (got*100)/need, need/1024, rate/1024)))
else: # no idea how much we need, just keep on counting...
self.csMsg("%s (got %dKB at %dKB/s so far)" % (msg, got/1024, rate/1024))
def downloadingFileContents(self, got, need):
"""
@see: callbacks.ChangesetCallback.downloadingFileContents
"""
self._downloading('Downloading files for changeset', got, self.rate, need)
def downloadingChangeSet(self, got, need):
"""
@see: callbacks.ChangesetCallback.downloadingChangeSet
"""
self._downloading('Downloading', got, self.rate, need)
def requestingFileContents(self):
"""
@see: callbacks.ChangesetCallback.requestingFileContents
"""
if self.csHunk[1] < 2:
self.csMsg("Requesting file contents")
else:
self.csMsg("Requesting file contents for changeset %d of %d" % self.csHunk)
def requestingChangeSet(self):
"""
@see: callbacks.ChangesetCallback.requestingChangeSet
"""
if self.csHunk[1] < 2:
self.csMsg("Requesting changeset")
else:
self.csMsg("Requesting changeset %d of %d" % self.csHunk)
def creatingRollback(self):
"""
@see: callbacks.UpdateCallback.creatingRollback
"""
self.updateMsg("Creating rollback")
def preparingUpdate(self, troveNum, troveCount):
"""
@see: callbacks.UpdateCallback.preparingUpdate
"""
self.updateMsg("Preparing update (%d of %d)" %
(troveNum, troveCount))
@locked
def restoreFiles(self, size, totalSize):
"""
@see: callbacks.UpdateCallback.restoreFiles
"""
# Locked, because we modify self.restored
if totalSize != 0:
self.restored += size
self.updateMsg("Writing %dk of %dk (%d%%)"
% (self.restored / 1024 , totalSize / 1024,
(self.restored * 100) / totalSize))
def removeFiles(self, fileNum, total):
"""
@see: callbacks.UpdateCallback.removeFiles
"""
if total != 0:
self.updateMsg("Removing %d of %d (%d%%)"
% (fileNum , total, (fileNum * 100) / total))
def creatingDatabaseTransaction(self, troveNum, troveCount):
"""
@see: callbacks.UpdateCallback.creatingDatabaseTransaction
"""
self.updateMsg("Creating database transaction (%d of %d)" %
(troveNum, troveCount))
def updatingDatabase(self, step, stepNum, stepCount):
if step == 'latest':
self.updateMsg('Updating list of latest versions: (%d of %d)' %
(stepNum, stepCount))
else:
self.updateMsg('Updating database: (%d of %d)' %
(stepNum, stepCount))
def runningPreTagHandlers(self):
"""
@see: callbacks.UpdateCallback.runningPreTagHandlers
"""
self.updateMsg("Running tag prescripts")
def runningPostTagHandlers(self):
"""
@see: callbacks.UpdateCallback.runningPostTagHandlers
"""
self.updateMsg("Running tag post-scripts")
def committingTransaction(self):
"""
@see: callbacks.UpdateCallback.committingTransaction
"""
self.updateMsg("Committing database transaction")
@locked
def setChangesetHunk(self, num, total):
"""
@see: callbacks.ChangesetCallback.setChangesetHunk
"""
self.csHunk = (num, total)
@locked
def setUpdateHunk(self, num, total):
"""
@see: callbacks.UpdateCallback.setUpdateHunk
"""
self.restored = 0
self.updateHunk = (num, total)
@locked
def setUpdateJob(self, jobs):
"""
@see: callbacks.UpdateCallback.setUpdateJob
"""
self._message('')
if self.updateHunk[1] < 2:
self.out.write('Applying update job:\n')
else:
self.out.write('Applying update job %d of %d:\n' % self.updateHunk)
# erase anything that is currently displayed
self._message('')
self.formatter.prepareJobs(jobs)
for line in self.formatter.formatJobTups(jobs, indent=' '):
self.out.write(line + '\n')
@locked
def tagHandlerOutput(self, tag, msg, stderr = False):
"""
@see: callbacks.UpdateCallback.tagHandlerOutput
"""
self._message('')
self.out.write('[%s] %s\n' % (tag, msg))
@locked
def troveScriptOutput(self, typ, msg):
"""
@see: callbacks.UpdateCallback.troveScriptOutput
"""
self._message('')
self.out.write("[%s] %s" % (typ, msg))
@locked
def troveScriptFailure(self, typ, errcode):
"""
@see: callbacks.UpdateCallback.troveScriptFailure
"""
self._message('')
self.out.write("[%s] %s" % (typ, errcode))
def capsuleSyncScan(self, capsuleType):
self.updateMsg("Scanning for %s capsule changes" % capsuleType)
def capsuleSyncCreate(self, capsuleType, name, num, total):
self.updateMsg("Collecting modifications to %s database (%d of %d)" %
(capsuleType, num, total))
def capsuleSyncApply(self, added, removed):
self._message('')
self.out.write('Synchronizing database with capsule changes\n')
def __init__(self, cfg=None, modelFile=None):
"""
Initialize this callback object.
@param cfg: Conary configuration
@type cfg: A ConaryConfiguration object.
@return: None
"""
callbacks.UpdateCallback.__init__(self)
if cfg:
self.setTrustThreshold(cfg.trustThreshold)
callbacks.LineOutput.__init__(self)
self.restored = 0
self.csHunk = (0, 0)
self.updateHunk = (0, 0)
self.csText = None
self.updateText = None
self.lock = threading.RLock()
if cfg:
fullVersions = cfg.fullVersions
showFlavors = cfg.fullFlavors
showLabels = cfg.showLabels
baseFlavors = cfg.flavor
showComponents = cfg.showComponents
db = conaryclient.ConaryClient(cfg, modelFile=modelFile).db
else:
fullVersions = showFlavors = showLabels = db = baseFlavors = None
showComponents = None
self.formatter = display.JobTupFormatter(affinityDb=db)
self.formatter.dcfg.setTroveDisplay(fullVersions=fullVersions,
fullFlavors=showFlavors,
showLabels=showLabels,
baseFlavors=baseFlavors,
showComponents=showComponents)
self.formatter.dcfg.setJobDisplay(compressJobs=not showComponents)
def displayChangedJobs(addedJobs, removedJobs, cfg):
db = conaryclient.ConaryClient(cfg).db
formatter = display.JobTupFormatter(affinityDb=db)
formatter.dcfg.setTroveDisplay(fullVersions=cfg.fullVersions,
fullFlavors=cfg.fullFlavors,
showLabels=cfg.showLabels,
baseFlavors=cfg.flavor,
showComponents=cfg.showComponents)
formatter.dcfg.setJobDisplay(compressJobs=not cfg.showComponents)
formatter.prepareJobLists([removedJobs | addedJobs])
if removedJobs:
print 'No longer part of job:'
for line in formatter.formatJobTups(removedJobs, indent=' '):
print line
if addedJobs:
print 'Added to job:'
for line in formatter.formatJobTups(addedJobs, indent=' '):
print line
def displayUpdateInfo(updJob, cfg, noRestart=False):
jobLists = updJob.getJobs()
db = conaryclient.ConaryClient(cfg).db
formatter = display.JobTupFormatter(affinityDb=db)
formatter.dcfg.setTroveDisplay(fullVersions=cfg.fullVersions,
fullFlavors=cfg.fullFlavors,
showLabels=cfg.showLabels,
baseFlavors=cfg.flavor,
showComponents=cfg.showComponents)
formatter.dcfg.setJobDisplay(compressJobs=not cfg.showComponents)
formatter.prepareJobLists(jobLists)
totalJobs = len(jobLists)
for num, job in enumerate(jobLists):
if totalJobs > 1:
if num in updJob.getCriticalJobs():
print '** ',
print 'Job %d of %d:' % (num + 1, totalJobs)
for line in formatter.formatJobTups(job, indent=' '):
print line
if updJob.getCriticalJobs() and not noRestart:
criticalJobs = updJob.getCriticalJobs()
if len(criticalJobs) > 1:
jobPlural = 's'
else:
jobPlural = ''
jobList = ', '.join([str(x + 1) for x in criticalJobs])
print
print '** The update will restart itself after job%s %s and continue updating' % (jobPlural, jobList)
return
@api.developerApi
def doUpdate(cfg, changeSpecs, **kwargs):
callback = kwargs.get('callback', None)
if not callback:
callback = callbacks.UpdateCallback(trustThreshold=cfg.trustThreshold)
kwargs['callback'] = callback
else:
callback.setTrustThreshold(cfg.trustThreshold)
syncChildren = kwargs.get('syncChildren', False)
syncUpdate = kwargs.pop('syncUpdate', False)
restartInfo = kwargs.get('restartInfo', None)
if syncChildren or syncUpdate:
installMissing = True
else:
installMissing = False
kwargs['installMissing'] = installMissing
fromChangesets = []
for path in kwargs.pop('fromFiles', []):
cs = changeset.ChangeSetFromFile(path)
fromChangesets.append(cs)
kwargs['fromChangesets'] = fromChangesets
# Look for items which look like files in the applyList and convert
# them into fromChangesets w/ the primary sets
for item in changeSpecs[:]:
if os.access(item, os.R_OK):
try:
cs = changeset.ChangeSetFromFile(item)
except:
continue
fromChangesets.append(cs)
changeSpecs.remove(item)
for troveTuple in cs.getPrimaryTroveList():
changeSpecs.append(trovetup.TroveTuple(*troveTuple).asString())
if kwargs.get('restartInfo', None):
# We don't care about applyList, we will set it later
applyList = None
else:
keepExisting = kwargs.get('keepExisting')
updateByDefault = kwargs.get('updateByDefault', True)
applyList = cmdline.parseChangeList(changeSpecs, keepExisting,
updateByDefault,
allowChangeSets=True)
_updateTroves(cfg, applyList, **kwargs)
# Clean up after ourselves
if restartInfo:
util.rmtree(restartInfo, ignore_errors=True)
def doModelUpdate(cfg, sysmodel, modelFile, otherArgs, **kwargs):
kwargs['systemModel'] = sysmodel
kwargs['systemModelFile'] = modelFile
kwargs['loadTroveCache'] = True
kwargs.setdefault('updateByDefault', True) # erase is not default case
kwargs.setdefault('model', False)
kwargs.setdefault('keepExisting', True) # prefer "install" to "update"
restartInfo = kwargs.get('restartInfo', None)
patchArgs = kwargs.pop('patchSpec', None)
fromChangesets = []
applyList = []
callback = kwargs.get('callback', None)
if not callback:
callback = callbacks.UpdateCallback(trustThreshold=cfg.trustThreshold)
kwargs['callback'] = callback
else:
callback.setTrustThreshold(cfg.trustThreshold)
if restartInfo is None:
addArgs = [x[1:] for x in otherArgs if x.startswith('+')]
rmArgs = [x[1:] for x in otherArgs if x.startswith('-')]
defArgs = [x for x in otherArgs
if not (x.startswith('+') or x.startswith('-'))]
# find any default arguments that represent changesets to
# install/update
for defArg in list(defArgs):
if kwargs['updateByDefault'] and os.path.isfile(defArg):
try:
cs = changeset.ChangeSetFromFile(defArg)
fromChangesets.append((cs, defArg))
defArgs.remove(defArg)
except filecontainer.BadContainer:
# not a changeset, must be a trove name
pass
if kwargs['updateByDefault']:
addArgs += defArgs
else:
rmArgs += defArgs
if rmArgs:
sysmodel.appendOpByName('erase', text=rmArgs)
updateName = { False: 'update',
True: 'install' }[kwargs['keepExisting']]
branchArgs = {}
for index, spec in enumerate(addArgs):
try:
troveSpec = trovetup.TroveSpec(spec)
version = versions.Label(troveSpec.version)
branchArgs[troveSpec] = index
except:
# Any exception is a parse failure in one of the
# two steps, and so we do not convert that argument
pass
if branchArgs:
client = conaryclient.ConaryClient(cfg)
repos = client.getRepos()
foundTroves = repos.findTroves(cfg.installLabelPath,
branchArgs.keys(),
defaultFlavor = cfg.flavor)
for troveSpec in foundTroves:
index = branchArgs[troveSpec]
foundTrove = foundTroves[troveSpec][0]
addArgs[index] = addArgs[index].replace(
troveSpec.version,
'%s/%s' %(foundTrove[1].trailingLabel(),
foundTrove[1].trailingRevision()))
disallowedChangesets = []
for cs, argName in fromChangesets:
for troveTuple in cs.getPrimaryTroveList():
# group and redirect changesets will break the model the
# next time it is run, so prevent them from getting in
# the model in the first place
if troveTuple[1].isOnLocalHost():
if troveTuple[0].startswith('group-'):
disallowedChangesets.append((argName, 'group',
trovetup.TroveTuple(*troveTuple).asString()))
continue
trvCs = cs.getNewTroveVersion(*troveTuple)
if trvCs.getType() == trove.TROVE_TYPE_REDIRECT:
disallowedChangesets.append((argName, 'redirect',
trovetup.TroveTuple(*troveTuple).asString()))
continue
addArgs.append(
trovetup.TroveTuple(*troveTuple).asString())
if disallowedChangesets:
raise errors.ConaryError(
'group and redirect changesets on a local label'
' cannot be installed:\n ' + '\n '.join(
'%s contains local %s: %s' % x
for x in disallowedChangesets))
if addArgs:
sysmodel.appendOpByName(updateName, text=addArgs)
if patchArgs:
sysmodel.appendOpByName('patch', text=patchArgs)
kwargs['fromChangesets'] = [x[0] for x in fromChangesets]
if kwargs.pop('model'):
sysmodel.write(sys.stdout)
sys.stdout.flush()
return None
keepExisting = kwargs.get('keepExisting')
updateByDefault = kwargs.get('updateByDefault', True)
applyList = cmdline.parseChangeList([], keepExisting,
updateByDefault,
allowChangeSets=True)
else:
# In the restart case, applyList == [] which says "sync to model"
pass
_updateTroves(cfg, applyList, **kwargs)
# Clean up after ourselves
if restartInfo:
util.rmtree(restartInfo, ignore_errors=True)
def _updateTroves(cfg, applyList, **kwargs):
# Take out the apply-related keyword arguments
applyDefaults = dict(
replaceFiles = False,
replaceManagedFiles = False,
replaceUnmanagedFiles = False,
replaceModifiedFiles = False,
replaceModifiedConfigFiles = False,
tagScript = None,
justDatabase = False,
skipCapsuleOps = False,
info = False,
keepJournal = False,
noRestart = False,
noScripts = False,
)
applyKwargs = {}
for k in applyDefaults:
if k in kwargs:
applyKwargs[k] = kwargs.pop(k)
callback = kwargs.pop('callback')
loadTroveCache = kwargs.pop('loadTroveCache', False)
applyKwargs['test'] = kwargs.get('test', False)
applyKwargs['localRollbacks'] = cfg.localRollbacks
applyKwargs['autoPinList'] = cfg.pinTroves
model = kwargs.pop('systemModel', None)
modelFile = kwargs.pop('systemModelFile', None)
modelGraph = kwargs.pop('modelGraph', None)
modelTrace = kwargs.pop('modelTrace', None)
noRestart = applyKwargs.get('noRestart', False)
client = conaryclient.ConaryClient(cfg, modelFile=modelFile)
client.setUpdateCallback(callback)
if kwargs.pop('disconnected', False):
client.disconnectRepos()
migrate = kwargs.get('migrate', False)
# even though we no longer differentiate forceMigrate, we still
# remove it from kwargs to avoid confusing prepareUpdateJob
kwargs.pop('forceMigrate', False)
restartInfo = kwargs.get('restartInfo', None)
# Initialize the critical update set
applyCriticalOnly = kwargs.get('applyCriticalOnly', False)
if kwargs.get('criticalUpdateInfo') is not None:
kwargs['criticalUpdateInfo'].criticalOnly = applyCriticalOnly
else:
kwargs['criticalUpdateInfo'] = CriticalUpdateInfo(applyCriticalOnly)
info = applyKwargs.pop('info', False)
# Rename depCheck to resolveDeps
depCheck = kwargs.pop('depCheck', True)
kwargs['resolveDeps'] = depCheck
if not info:
client.checkWriteableRoot()
# Unfortunately there's no easy way to make 'test' or 'info' mode work
# with capsule sync, doubly so because it influences the decisions made
# later on about what troves to update. So this will always really
# apply, but the good news is that it never modifies the system outside
# of the Conary DB.
client.syncCapsuleDatabase(callback, makePins=True)
updJob = client.newUpdateJob()
try:
if model:
changeSetList = kwargs.get('fromChangesets', [])
criticalUpdates = kwargs.get('criticalUpdateInfo', None)
tc = modelupdate.CMLTroveCache(client.getDatabase(),
client.getRepos(),
callback = callback,
changeSetList =
changeSetList)
tcPath = cfg.root + cfg.dbPath + '/modelcache'
if loadTroveCache:
if os.path.exists(tcPath):
log.info("loading %s", tcPath)
callback.loadingModelCache()
tc.load(tcPath)
ts = client.cmlGraph(model, changeSetList = changeSetList)
if modelGraph is not None:
ts.g.generateDotFile(modelGraph)
suggMap = client._updateFromTroveSetGraph(updJob, ts, tc,
fromChangesets = changeSetList,
criticalUpdateInfo = criticalUpdates,
callback = callback)
if modelTrace is not None:
ts.g.trace([ parseTroveSpec(x) for x in modelTrace ] )
finalModel = copy.deepcopy(model)
if model.suggestSimplifications(tc, ts.g):
log.info("possible system model simplifications found")
ts2 = client.cmlGraph(model, changeSetList = changeSetList)
updJob2 = client.newUpdateJob()
try:
suggMap2 = client._updateFromTroveSetGraph(updJob2, ts2,
tc,
fromChangesets = changeSetList,
criticalUpdateInfo = criticalUpdates)
except errors.TroveNotFound:
log.info("bad model generated; bailing")
else:
if (suggMap == suggMap2 and
updJob.getJobs() == updJob2.getJobs()):
log.info("simplified model verfied; using it instead")
ts = ts2
finalModel = model
updJob = updJob2
suggMap = suggMap2
else:
log.info("simplified model changed result; ignoring")
model = finalModel
modelFile.model = finalModel
if tc.cacheModified():
log.info("saving %s", tcPath)
callback.savingModelCache()
tc.save(tcPath)
callback.done()
else:
suggMap = client.prepareUpdateJob(updJob, applyList, **kwargs)
except:
callback.done()
client.close()
raise
if info:
callback.done()
displayUpdateInfo(updJob, cfg, noRestart=noRestart)
if restartInfo and not model:
callback.done()
newJobs = set(itertools.chain(*updJob.getJobs()))
oldJobs = set(updJob.getItemList())
addedJobs = newJobs - oldJobs
removedJobs = oldJobs - newJobs
if addedJobs or removedJobs:
print
print 'NOTE: after critical updates were applied, the contents of the update were recalculated:'
print
displayChangedJobs(addedJobs, removedJobs, cfg)
updJob.close()
client.close()
return
if model:
missingLocalTroves = model.getMissingLocalTroves(tc, ts)
if missingLocalTroves:
print 'Update would leave references to missing local troves:'
for troveTup in missingLocalTroves:
if not isinstance(troveTup, trovetup.TroveTuple):
troveTup = trovetup.TroveTuple(troveTup)
print "\t" + str(troveTup)
client.close()
return
if suggMap:
callback.done()
dcfg = display.DisplayConfig()
dcfg.setTroveDisplay(fullFlavors = cfg.fullFlavors,
fullVersions = cfg.fullVersions,
showLabels = cfg.showLabels)
formatter = display.TroveTupFormatter(dcfg)
print "Including extra troves to resolve dependencies:"
print " ",
items = sorted(set(formatter.formatNVF(*x)
for x in itertools.chain(*suggMap.itervalues())))
print " ".join(items)
askInteractive = cfg.interactive
if restartInfo:
callback.done()
newJobs = set(itertools.chain(*updJob.getJobs()))
oldJobs = set(updJob.getItemList())
addedJobs = newJobs - oldJobs
removedJobs = oldJobs - newJobs
if not model and addedJobs or removedJobs:
print 'NOTE: after critical updates were applied, the contents of the update were recalculated:'
displayChangedJobs(addedJobs, removedJobs, cfg)
else:
askInteractive = False
if not updJob.jobs:
# Nothing to do
print 'Update would not modify system'
if model and not kwargs.get('test'):
# Make sure 'conary sync' clears model.next even if nothing needs
# to be done.
modelFile.closeSnapshot()
updJob.close()
client.close()
return
elif askInteractive:
print 'The following updates will be performed:'
displayUpdateInfo(updJob, cfg, noRestart=noRestart)
if migrate and cfg.interactive:
print ('Migrate erases all troves not referenced in the groups'
' specified.')
if askInteractive:
if migrate:
style = 'migrate'
else:
style = 'update'
okay = cmdline.askYn('continue with %s? [Y/n]' % style, default=True)
if not okay:
updJob.close()
client.close()
return
if not noRestart and updJob.getCriticalJobs():
print "Performing critical system updates, will then restart update."
try:
restartDir = client.applyUpdateJob(updJob, **applyKwargs)
finally:
updJob.close()
client.close()
if restartDir:
params = sys.argv
# Write command line to disk
import xmlrpclib
cmdlinefile = open(os.path.join(restartDir, 'cmdline'), "w")
cmdlinefile.write(xmlrpclib.dumps((params, ), methodresponse = True))
cmdlinefile.close()
# CNY-980: we should have the whole script of changes to perform in
# the restart directory (in the job list); if in migrate mode, re-exec
# as regular update
if migrate and 'migrate' in params:
params[params.index('migrate')] = 'update'
params.extend(['--restart-info=%s' % restartDir])
client.close()
raise errors.ReexecRequired(
'Critical update completed, rerunning command...', params,
restartDir)
else:
if (not kwargs.get('test', False)) and model:
modelFile.closeSnapshot()
class UpdateAllFormatter(object):
def formatNVF(self, name, version, flavor):
if version and (flavor is not None) and not flavor.isEmpty():
return "'%s=%s[%s]'" % (name, version.asString(), deps.formatFlavor(flavor))
if (flavor is not None) and not flavor.isEmpty():
return "'%s[%s]'" % (name, deps.formatFlavor(flavor))
if version:
return "%s=%s" % (name, version.asString())
return name
def updateAll(cfg, **kwargs):
showItems = kwargs.pop('showItems', False)
restartInfo = kwargs.get('restartInfo', None)
migrate = kwargs.pop('migrate', False)
modelArg = kwargs.pop('model', False)
modelFile = kwargs.get('systemModelFile', None)
model = kwargs.get('systemModel', None)
infoArg = kwargs.get('info', False)
if model and modelFile and modelFile.exists() and restartInfo is None:
model.refreshVersionSnapshots()
if modelArg:
model.write(sys.stdout)
sys.stdout.flush()
return None
kwargs['installMissing'] = kwargs['removeNotByDefault'] = migrate
kwargs['callback'] = UpdateCallback(cfg)
# load trove cache only if --info provided
kwargs['loadTroveCache'] = infoArg
client = conaryclient.ConaryClient(cfg)
# We want to be careful not to break the old style display, for whoever
# might have a parser for that output.
withLongDisplay = (cfg.fullFlavors or cfg.fullVersions or cfg.showLabels)
formatter = UpdateAllFormatter()
if restartInfo or (model and modelFile and modelFile.exists()):
updateItems = []
applyList = None
else:
if showItems and withLongDisplay:
updateItems = client.getUpdateItemList()
dcfg = display.DisplayConfig()
dcfg.setTroveDisplay(fullFlavors = cfg.fullFlavors,
fullVersions = cfg.fullVersions,
showLabels = cfg.showLabels)
formatter = display.TroveTupFormatter(dcfg)
else:
updateItems = client.fullUpdateItemList()
applyList = [ (x[0], (None, None), x[1:], True) for x in updateItems ]
if showItems:
for (name, version, flavor) in sorted(updateItems, key=lambda x:x[0]):
print formatter.formatNVF(name, version, flavor)
return
_updateTroves(cfg, applyList, **kwargs)
# Clean up after ourselves
if restartInfo:
util.rmtree(restartInfo, ignore_errors=True)
def changePins(cfg, troveStrList, pin = True,
systemModel = None, systemModelFile = None,
callback = None):
client = conaryclient.ConaryClient(cfg)
client.checkWriteableRoot()
troveList = []
for item in troveStrList:
name, ver, flv = parseTroveSpec(item)
troves = client.db.findTrove(None, (name, ver, flv))
troveList += troves
client.pinTroves(troveList, pin = pin)
if systemModel and systemModelFile and not pin:
doModelUpdate(cfg, systemModel, systemModelFile, [], callback=callback)
def revert(cfg):
conaryclient.ConaryClient.revertJournal(cfg)
|
|
#!/usr/bin/env python
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
import re
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def add_datacenter(fac, name, cool, coolMultiplier, currency, cost,
lineVoltage, width, depth, deratingType, deratingPercent):
datacenter = hpov.common.make_datacenter_dict(name, cool, coolMultiplier,
currency, cost, lineVoltage,
width, depth, deratingType,
deratingPercent)
ret = fac.add_datacenter(datacenter)
if 'coolingMultiplier' in ret:
print('Name: ', ret['name'])
print('Cooling Capacity: ', ret['coolingCapacity'])
print('Cooling Multiplier: ', ret['coolingMultiplier'])
print('Cost Per Kilowatt Hour: ', ret['costPerKilowattHour'])
print('Currency: ', ret['currency'])
print('Default Power Line Voltage: ', ret['defaultPowerLineVoltage'])
print('Derating Type: ', ret['deratingType'])
print('Derating Percentage: ', ret['deratingPercentage'])
print('Depth: ', ret['depth'])
print('Width: ', ret['width'])
else:
pprint(ret)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Add a new Data Center resource
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HPE OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HPE OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HPE OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-j', dest='domain', required=False,
default='Local',
help='''
HPE OneView Authorized Login Domain''')
parser.add_argument('-n', dest='name',
required=True,
help='''
Name of the Data Center''')
parser.add_argument('-co', dest='cool', type=int,
default=0,
required=False,
help='''
Maximum cooling capacity for the data center in watts''')
parser.add_argument('-cm', dest='coolMultiplier',
required=False,
default=1.5, type=float,
help='''
The ratio of cooling costs to power costs of the IT equipment. This
value represents the relative cost of cooling the system compared to
the cost of powering the system. The default value of 1.5 indicates
that it costs 1.5 as much to cool the system as it does to power the
system. This value is multiplied by the kilowatt - hours used by the
system to obtain the cooling kilowatt - hours that are used in the
analysis section of graphs that display power consumption.''')
parser.add_argument('-ct', dest='cost', type=float,
required=False,
help='''
Enegery cost per kilowatt-hour''')
parser.add_argument('-cu', dest='currency',
default='USD',
required=False,
help='''
The currency unit for energy cost, default is "USD"''')
parser.add_argument('-lv', dest='lineVoltage', type=int,
default=220,
required=False,
help='''
The default power line voltage used for watts/amps translation
when voltage is not otherwise available (for example when summarizing
power at the rack or data center level), default is 220''')
parser.add_argument('-wi', dest='width', type=int,
required=True,
help='''
Data Center width in millimeters''')
parser.add_argument('-de', dest='depth', type=int,
required=True,
help='''
Data Center depth in millimeters''')
parser.add_argument('-dt', dest='deratingType',
required=True, choices=['NaJp', 'Custom', 'None'],
default='NaJp',
help='''
The default power line voltage used for watts/amps
translation when voltage is not otherwise available (for
example when summarizing power at the rack or data
center level''')
parser.add_argument('-dp', dest='deratingPercent',
required=False, type=float,
help='''
Custom eletrical derating percentage, this value is
specified by the drating type, unless the type is
Custom, then is must be specified here''')
args = parser.parse_args()
credential = {'authLoginDomain': args.domain.upper(), 'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
fac = hpov.facilities(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
if args.depth < 1000 or args.depth > 50000:
print('Error, the depth of the data center must be between 1000 and 50000 millimeters')
sys.exit()
if args.width < 1000 or args.width > 50000:
print('Error, the width of the data center must be between 1000 and 50000 millimeters')
sys.exit()
if args.deratingType == 'Custom' and not args.deratingPercent:
print('Error, the derating percentage must be specified when using the Custom derating type')
sys.exit()
login(con, credential)
acceptEULA(con)
add_datacenter(fac, args.name, args.cool, args.coolMultiplier,
args.currency, args.cost, args.lineVoltage, args.width,
args.depth, args.deratingType, args.deratingPercent)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2017 Snowflake Computing Inc. All right reserved.
#
from io import StringIO
import pytest
from snowflake.connector.compat import PY2
from snowflake.connector.util_text import split_statements
def _to_unicode(sql):
return sql.decode('utf-8') if PY2 and isinstance(sql, str) else sql
def test_simple_sql():
with StringIO(_to_unicode("show tables")) as f:
itr = split_statements(f)
assert next(itr) == ('show tables', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode("show tables;")) as f:
itr = split_statements(f)
assert next(itr) == ('show tables;', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode("select 1;select 2")) as f:
itr = split_statements(f)
assert next(itr) == ('select 1;', False)
assert next(itr) == ('select 2', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode("select 1;select 2;")) as f:
itr = split_statements(f)
assert next(itr) == ('select 1;', False)
assert next(itr) == ('select 2;', False)
with pytest.raises(StopIteration):
next(itr)
s = "select 1; -- test"
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == ('select 1; -- test', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ('select 1;', False)
with pytest.raises(StopIteration):
next(itr)
s = "select /* test */ 1; -- test comment select 1;"
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == (
'select /* test */ 1; -- test comment select 1;', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ('select 1;', False)
with pytest.raises(StopIteration):
next(itr)
def test_multiple_line_sql():
s = """select /* test */ 1; -- test comment
select 23;"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == (
('select /* test */ 1; -- test comment', False))
assert next(itr) == ('select 23;', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ('select 1;', False)
assert next(itr) == ('select 23;', False)
with pytest.raises(StopIteration):
next(itr)
s = """select /* test */ 1; -- test comment
select 23; -- test comment 2"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == (
'select /* test */ 1; -- test comment', False)
assert next(itr) == ('select 23; -- test comment 2', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ('select 1;', False)
assert next(itr) == ('select 23;', False)
with pytest.raises(StopIteration):
next(itr)
s = """select /* test */ 1; -- test comment
select 23; /* test comment 2 */ select 3"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == (
'select /* test */ 1; -- test comment', False)
assert next(itr) == ('select 23;', False)
assert next(itr) == ('/* test comment 2 */ select 3', False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ('select 1;', False)
assert next(itr) == ('select 23;', False)
assert next(itr) == ('select 3', False)
with pytest.raises(StopIteration):
next(itr)
s = """select /* test */ 1; -- test comment
select 23; /* test comment 2
*/ select 3;"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == (
"select /* test */ 1; -- test comment", False)
assert next(itr) == ("select 23;", False)
assert next(itr) == ("/* test comment 2\n*/ select 3;", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 1;", False)
assert next(itr) == ("select 23;", False)
assert next(itr) == ("select 3;", False)
with pytest.raises(StopIteration):
next(itr)
s = """select /* test
continued comments 1
continued comments 2
*/ 1; -- test comment
select 23; /* test comment 2
*/ select 3;"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == ("select /* test\n"
" continued comments 1\n"
" continued comments 2\n"
" */ 1; -- test comment", False)
assert next(itr) == ("select 23;", False)
assert next(itr) == ("/* test comment 2\n*/ select 3;", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 1;", False)
assert next(itr) == ("select 23;", False)
assert next(itr) == ("select 3;", False)
with pytest.raises(StopIteration):
next(itr)
def test_quotes():
s = """select 'hello', 1; -- test comment
select 23,'hello"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'hello', 1; -- test comment", False)
assert next(itr) == ("select 23,'hello", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 'hello', 1;", False)
assert next(itr) == ("select 23,'hello", False)
with pytest.raises(StopIteration):
next(itr)
s = """select 'he"llo', 1; -- test comment
select "23,'hello" """
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'he\"llo', 1; -- test comment", False)
assert next(itr) == ("select \"23,'hello\"", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 'he\"llo', 1;", False)
assert next(itr) == ("select \"23,'hello\"", False)
with pytest.raises(StopIteration):
next(itr)
s = """select 'hello
', 1; -- test comment
select "23,'hello" """
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'hello\n', 1; -- test comment", False)
assert next(itr) == ("select \"23,'hello\"", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 'hello\n', 1;", False)
assert next(itr) == ("select \"23,'hello\"", False)
with pytest.raises(StopIteration):
next(itr)
s = """select 'hello''
', 1; -- test comment
select "23,'','hello" """
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'hello''\n', 1; -- test comment", False)
assert next(itr) == ("select \"23,'','hello\"", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 'hello''\n', 1;", False)
assert next(itr) == ("select \"23,'','hello\"", False)
with pytest.raises(StopIteration):
next(itr)
def test_quotes_in_comments():
s = """select 'hello'; -- test comment 'hello2' in comment
/* comment 'quote'*/ select true
"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'hello'; -- test comment 'hello2' in comment", False)
assert next(itr) == (
"/* comment 'quote'*/ select true", False)
with pytest.raises(StopIteration):
next(itr)
def test_backslash():
s = """select 'hello\\', 1; -- test comment
select 23,'\nhello"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == (
"select 'hello\\', 1; -- test comment", False)
assert next(itr) == ("select 23,'\nhello", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("select 'hello\\', 1;", False)
assert next(itr) == ("select 23,'\nhello", False)
with pytest.raises(StopIteration):
next(itr)
def test_file_with_slash_star():
s = """put file:///tmp/* @%tmp;
ls @%tmp;"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == ("put file:///tmp/* @%tmp;", True)
assert next(itr) == ("ls @%tmp;", False)
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("put file:///tmp/* @%tmp;", True)
assert next(itr) == ("ls @%tmp;", False)
with pytest.raises(StopIteration):
next(itr)
s = """list @~;
-- first half
put file://$SELF_DIR/staging-test-data/*.csv.gz @~;
put file://$SELF_DIR/staging-test-data/foo.csv.gz @~;
put file://$SELF_DIR/staging-test-data/foo.csv.gz @~ overwrite=true;
-- second half
put file://$SELF_DIR/staging-test-data/foo.csv.gz @~/foo;
put file://$SELF_DIR/staging-test-data/bar.csv.gz @~/bar;
list @~;
remove @~ pattern='.*.csv.gz';
list @~;
"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f)
assert next(itr) == ("list @~;", False)
# no comment line is returned
assert next(itr) == (
"-- first half\n"
"put file://$SELF_DIR/staging-test-data/*.csv.gz @~;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~ "
"overwrite=true;", True)
# no comment line is returned
assert next(itr) == (
"-- second half\n"
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~/foo;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/bar.csv.gz @~/bar;", True)
# no empty line is returned
assert next(itr) == ("list @~;", False)
assert next(itr) == ("remove @~ pattern='.*.csv.gz';", False)
assert next(itr) == ("list @~;", False)
# last raises StopIteration
with pytest.raises(StopIteration):
next(itr)
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=True)
assert next(itr) == ("list @~;", False)
# no comment line is returned
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/*.csv.gz @~;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~ "
"overwrite=true;", True)
# no comment line is returned
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/foo.csv.gz @~/foo;", True)
assert next(itr) == (
"put file://$SELF_DIR/staging-test-data/bar.csv.gz @~/bar;", True)
# no empty line is returned
assert next(itr) == ("list @~;", False)
assert next(itr) == ("remove @~ pattern='.*.csv.gz';", False)
assert next(itr) == ("list @~;", False)
# last raises StopIteration
with pytest.raises(StopIteration):
next(itr)
def test_sql_with_commands():
with StringIO(_to_unicode("""create or replace view aaa
as select * from
LINEITEM limit 1000;
!spool $outfile
show views like 'AAA';
!spool off
drop view if exists aaa;
show tables""")) as f:
itr = split_statements(f)
assert next(itr) == ("""create or replace view aaa
as select * from
LINEITEM limit 1000;""", False)
assert next(itr) == ("""!spool $outfile""", False)
assert next(itr) == ("show views like 'AAA';", False)
assert next(itr) == ("!spool off", False)
assert next(itr) == ("drop view if exists aaa;", False)
assert next(itr) == ("show tables", False)
with pytest.raises(StopIteration):
next(itr)
def test_sql_example1():
with StringIO(_to_unicode("""
create or replace table a(aa int, bb string);
truncate a;
rm @%a;
put file://a.txt @%a;
copy into a;
select * from a;
drop table if exists a;""")) as f:
itr = split_statements(f)
assert next(itr) == (
"create or replace table a(aa int, bb string);", False)
assert next(itr) == ("truncate a;", False)
assert next(itr) == ("rm @%a;", False)
assert next(itr) == ("put file://a.txt @%a;", True)
assert next(itr) == ("copy into a;", False)
assert next(itr) == ("select * from a;", False)
assert next(itr) == ("drop table if exists a;", False)
with pytest.raises(StopIteration):
next(itr)
def test_space_before_put():
with StringIO(_to_unicode("""
-- sample data uploads
PUT file:///tmp/data.txt @%ab;
SELECT 1; /* 134 */ select /* 567*/ 345;>
GET @%bcd file:///tmp/aaa.txt;
""")) as f:
itr = split_statements(f)
assert next(itr) == ("""-- sample data uploads
PUT file:///tmp/data.txt @%ab;""", True)
assert next(itr) == ("""SELECT 1;""", False)
assert next(itr) == ("""/* 134 */ select /* 567*/ 345;>""", False)
assert next(itr) == ("""GET @%bcd file:///tmp/aaa.txt;""", True)
with pytest.raises(StopIteration):
next(itr)
def test_empty_statement():
with StringIO(_to_unicode("""select 1;
-- tail comment1
-- tail comment2
""")) as f:
itr = split_statements(f)
assert next(itr) == ("""select 1;""", False)
assert next(itr) == ("""-- tail comment1
-- tail comment2""", None)
with pytest.raises(StopIteration):
next(itr)
def test_multiple_comments():
s = """--- test comment 1
select /*another test comments*/ 1; -- test comment 2
-- test comment 3
select 2;
"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=False)
assert next(itr) == (
"--- test comment 1\n"
"select /*another test comments*/ 1; -- test comment 2", False)
assert next(itr) == ("-- test comment 3\nselect 2;", False)
def test_comments_with_semicolon():
s = """--test ;
select 1;
"""
with StringIO(_to_unicode(s)) as f:
itr = split_statements(f, remove_comments=False)
assert next(itr) == (
"--test ;\n"
"select 1;", False
)
with pytest.raises(StopIteration):
next(itr)
|
|
#!/usr/bin/env python
import sys
import time
import json
#from multiprocessing import Process, Queue, Value,Array
import robot_class
from hokuyolx import HokuyoLX
#import main_file
import smach
import roslib
import smach_ros
import rospy
# import logging
# logging.getLogger('rosout').setLevel(logging.CRITICAL)
# class Filter(logging.Filter):
# def filter(sf, record):
# return 'State machine transitioning' not in record.msg
# return 'State machine transitioning' not in record.msg
# logging.getLogger('rosout').addFilter(Filter())
test_time = 0
duration = 2
def parse_strategy(strategy_text, config_dict):
commands_nocomments = [[part.strip() for part in line.split(":")]
for line in strategy_text.split('\n') if line[0] != '#']
def parameter_parse(command):
def isfloat(value): # check if value can be converted to float
try:
float(value)
return True
except:
return False
if len(command) == 1:
return [command[0], None]
pars = command[1].split(',') # if it is a sequence of parameters in a command
pars = map(lambda x: x.strip().split(' '), pars) # parsed any string not whitespace
for i, par_vals in enumerate(pars):
if not isfloat(par_vals[0].strip()):# check if parameter was a string parameter
pars[i] = config_dict[' '.join(par_vals).strip()] # use hardcoded value from game_conf json
else:
pars[i] = [float(val) for val in par_vals]
return [command[0], pars]
return map(parameter_parse, commands_nocomments)
class RobotInit(smach.State):
def __init__(sf, outcomes=[], input_keys=[], output_keys=[]):
smach.State.__init__(sf, outcomes, input_keys, output_keys)
def execute(sf, ud):
#ud.robot = Robot(config_data=ud.robot_data)
# checking for button in a loop...
#time.sleep(1)
ud.robot = robot_class.Robot(config_data=ud.robot_data)
ud.start_time = time.time()
#time.sleep(test_time)
time.sleep(1)
return 'robot initialized'
class Timer(smach.State):
def __init__(sf, outcomes=[], input_keys=[], output_keys=[]):
smach.State.__init__(sf, outcomes, input_keys, output_keys)
def execute(sf, ud):
#cnt = 10
while 'start_time' not in ud:# and cnt:
print('>>> TIMER Waiting for data...')
#cnt -=1
time.sleep(0.5)
print('>>> TIMER GOT DATA! x = '+str(ud.start_time))
time.sleep(ud.duration)
return 'time elapsed'
class CommandHandler(smach.State):
def __init__(sf, outcomes=[], input_keys=[], output_keys=[]):
smach.State.__init__(sf, outcomes, input_keys, output_keys)
sf.command_num = 0
def execute(sf, ud):
#time.sleep(test_time)
#print 'Command handler userdata keys', ud.__getattr__()
while 'robot' not in ud:
print 'Command handler userdata keys', '\n'.join([str(key) + str(ud[key]) for key in ud.keys()])#__contains__()
print('>>> COMMAND HANDLER Waiting for data...')
time.sleep(0.5)
if not sf.command_num:
print('>>> COMMAND HANDLER GOT ROBOT!')
if sf.command_num == len(ud.parsed_strategy):
return 'strategy ended'
if sf.preempt_requested():
sf.service_preempt()
return 'preempted'
command = ud.parsed_strategy[sf.command_num]
sf.command_num += 1
#ud.action = command[0]
ud.parameter = command[1]
return command[0].lower()
def request_preempt(sf):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(sf)
#rospy.logwarn("Preempted!")
class Motion(smach.State):
def __init__(sf, outcomes=[], input_keys=[], output_keys=[]):
smach.State.__init__(sf, outcomes, input_keys, output_keys)
def execute(sf, ud):
time.sleep(test_time)
return 'succeeded'
def request_preempt(sf):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(sf)
#rospy.logwarn("Preempted!")
class TakeCylinder(smach.State):
def __init__(sf, outcomes=[], input_keys=[], output_keys=[]):
smach.State.__init__(sf, outcomes, input_keys, output_keys)
def execute(sf, ud):
time.sleep(test_time)
return 'succeeded'
class DropCylinder(smach.State):
def __init__(sf, outcomes=[], input_keys=[], output_keys=[]):
smach.State.__init__(sf, outcomes, input_keys, output_keys)
def execute(sf, ud):
time.sleep(test_time)
return 'succeeded'
class CollisionHandler(smach.State):
def __init__(sf, outcomes=[], input_keys=[], output_keys=[]):
smach.State.__init__(sf, outcomes, input_keys, output_keys)
def execute(sf, ud):
time.sleep(test_time)
ud.new_point = None
return 'succeeded'
def request_preempt(sf):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(sf)
#rospy.logwarn("Preempted!")
class FunnyAction(smach.State):
def __init__(sf, outcomes=[], input_keys=[], output_keys=[]):
smach.State.__init__(sf, outcomes, input_keys, output_keys)
def execute(sf, ud):
time.sleep(test_time)
return 'succeeded'
def request_preempt(sf):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(sf)
#rospy.logwarn("Preempted!")
# gets called when ANY child state terminates
def startup_child_term_cb(outcome_map):
# terminate all running states if TIMER finished with outcome 'time elapsed'
if outcome_map['TIMER'] == 'time elapsed':
return True
if outcome_map['ROBOT'] == 'aborted':
return True
# We need to wait for 90 seconds (when timer ends)
# terminate all running states if ROBOT finished
#if outcome_map['ROBOT']:
# return True
# in all other case, just keep running, don't terminate anything
return False
# gets called when ALL child states are terminated
def startup_out_cb(outcome_map):
if outcome_map['ROBOT'] == 'strategy ended':
# print(outcome_map)
return 'strategy succeeded'
elif outcome_map['TIMER'] == 'time elapsed':
return 'timer ended'
elif outcome_map['ROBOT'] == 'aborted':
return 'robot aborted'
else:
return 'strategy succeeded'
def create_fsm():
game_fsm = smach.StateMachine(outcomes=['succeeded', 'aborted'],
output_keys = [])
with game_fsm:
robot_strategy_fsm = smach.Concurrence(
outcomes=['timer ended', 'strategy succeeded', 'robot aborted'],
default_outcome='timer ended',
input_keys=[],
output_keys=[],
child_termination_cb = startup_child_term_cb, # use instead of outcome_map
outcome_cb = startup_out_cb
)
with open(sys.argv[1]) as config_f:
config_dict = json.load(config_f)
robot_strategy_ud = robot_strategy_fsm.userdata
robot_strategy_ud.robot_data = config_dict['robot data']
actions =[action_str.encode('ascii','ignore') for action_str in config_dict['strategy actions']]
robot_strategy_ud.game_field = config_dict['game field']
with open(sys.argv[2]) as f_rob_strategy:
robot_strategy_ud.parsed_strategy = parse_strategy(f_rob_strategy.read(),robot_strategy_ud.robot_data)
robot_strategy_ud.duration = sys.argv[3] if len(sys.argv) > 3 else duration # 90 default seconds duration of the game timer
#startup_ud.start_time = None
smach.StateMachine.add('ROBOT STRATEGY', robot_strategy_fsm,
transitions={'timer ended': 'FUNNY ACTION',
'strategy succeeded': 'FUNNY ACTION',
'robot aborted': 'aborted'})
smach.StateMachine.add('FUNNY ACTION', FunnyAction(outcomes=['succeeded', 'aborted'],
input_keys=['robot', 'parameter', 'state'],
output_keys=[]),
transitions={'succeeded': 'succeeded',
'aborted': 'aborted'})
with robot_strategy_fsm:
# Here we initialize and wait the end of it
smach.Concurrence.add('ROBOT INIT', RobotInit(outcomes=['robot initialized', 'initialization failed'],
input_keys=['robot_data', 'parsed_strategy'],#['robot_data']
output_keys=['start_time', 'robot', 'parsed_strategy']))
# transitions={'robot initialized': 'ROBOT STRATEGY',
# 'initialization failed': 'aborted'})
smach.Concurrence.add('TIMER', Timer(outcomes=['time elapsed'],
input_keys=['start_time', 'duration'],
output_keys=['deadline1']))
sm_robot = smach.StateMachine(outcomes=['aborted', 'strategy ended', 'preempted'],
input_keys = ['game_field', 'parsed_strategy', 'robot'],
output_keys=[])
smach.Concurrence.add('ROBOT', sm_robot)
with sm_robot:
# Command Hadler
# Actions as states
smach.StateMachine.add('TAKE CYLINDER', TakeCylinder(outcomes=['succeeded', 'aborted'],
input_keys=['parameter', 'robot'],
output_keys=[]),
#state = 0, 1, 2 0 - succeded, 1 - aborted
transitions={'succeeded': 'COMMAND HANDLER',
'aborted': 'COMMAND HANDLER'})
smach.StateMachine.add('DROP CYLINDER', DropCylinder(outcomes=['succeeded', 'aborted'],
input_keys=['parameter', 'robot'],
output_keys=[]),
#state = 0, 1, 2 0 - succeded, 1 - aborted
transitions={'succeeded': 'COMMAND HANDLER',
'aborted': 'COMMAND HANDLER'})
motion_fsm = smach.Concurrence(
outcomes=['succeeded', 'aborted'],
default_outcome='succeeded',
input_keys=['point', 'robot'], # parameter remapping
output_keys=['new_point', 'robot'],
#TODO# child_termination_cb = child_term_cb, // use instead of outcome_map
#TODO# outcome_cb = out_cb,
outcome_map = {
'succeeded': {'COLLISION HANDLER': 'succeeded', 'MOTION': 'succeeded'},
'aborted': {'COLLISION HANDLER': 'aborted'},
#'aborted': {'ROBOT': 'aborted'}
}
)
smach.StateMachine.add('MOVE', motion_fsm,
transitions={'succeeded': 'COMMAND HANDLER',
'aborted': 'COMMAND HANDLER'},
remapping = {'point':'parameter'})
with motion_fsm:
smach.Concurrence.add('MOTION', Motion(outcomes=['succeeded', 'aborted'],
input_keys=['point', 'robot'],
output_keys=[]))
smach.Concurrence.add('COLLISION HANDLER', CollisionHandler(outcomes=['succeeded', 'aborted'],
input_keys=['point', 'robot'],
output_keys=['new_point']))
command_handler_trans = {action:action.upper() for action in actions} # commands parsing for Command handler
command_handler_trans.update({'strategy ended':'strategy ended', 'preempted':'preempted'})
smach.StateMachine.add('COMMAND HANDLER', CommandHandler(outcomes=actions + ['strategy ended'] + ['preempted'],
input_keys=['parsed_strategy', 'robot'],
output_keys=['parameter', 'robot']),
transitions=command_handler_trans)
# End Robot states initialisation, except Funny action
#End of FSM description
return game_fsm
if __name__ == "__main__":
tmp = time.time()
fsm = create_fsm()
rospy.init_node('FSM', anonymous=True)
sis = smach_ros.IntrospectionServer('server_name', fsm, '/SM_ROOT')
sis.start()
# Execute the state machine
fsm.execute()
# Wait for ctrl-c to stop the application
#rospy.spin()
sis.stop()
print 'FSM elapsed after: ', time.time() - tmp, ' sec'
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import logging
import queue
import sys
import threading
import time
import traceback
from builtins import object
from builtins import range
from concurrent import futures
import grpc
from future.utils import raise_
from future.utils import with_metaclass
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
class SdkHarness(object):
REQUEST_METHOD_PREFIX = '_request_'
SCHEDULING_DELAY_THRESHOLD_SEC = 5*60 # 5 Minutes
def __init__(self, control_address, worker_count, credentials=None,
profiler_factory=None):
self._alive = True
self._worker_count = worker_count
self._worker_index = 0
if credentials is None:
logging.info('Creating insecure control channel.')
self._control_channel = grpc.insecure_channel(control_address)
else:
logging.info('Creating secure control channel.')
self._control_channel = grpc.secure_channel(control_address, credentials)
grpc.channel_ready_future(self._control_channel).result(timeout=60)
logging.info('Control channel established.')
self._control_channel = grpc.intercept_channel(
self._control_channel, WorkerIdInterceptor())
self._data_channel_factory = data_plane.GrpcClientDataChannelFactory(
credentials)
self._state_handler_factory = GrpcStateHandlerFactory()
self._profiler_factory = profiler_factory
self.workers = queue.Queue()
# one thread is enough for getting the progress report.
# Assumption:
# Progress report generation should not do IO or wait on other resources.
# Without wait, having multiple threads will not improve performance and
# will only add complexity.
self._progress_thread_pool = futures.ThreadPoolExecutor(max_workers=1)
self._process_thread_pool = futures.ThreadPoolExecutor(
max_workers=self._worker_count)
self._monitoring_thread_pool = futures.ThreadPoolExecutor(max_workers=1)
self._instruction_id_vs_worker = {}
self._fns = {}
self._responses = queue.Queue()
self._process_bundle_queue = queue.Queue()
self._unscheduled_process_bundle = {}
logging.info('Initializing SDKHarness with %s workers.', self._worker_count)
def run(self):
control_stub = beam_fn_api_pb2_grpc.BeamFnControlStub(self._control_channel)
no_more_work = object()
# Create workers
for _ in range(self._worker_count):
# SdkHarness manage function registration and share self._fns with all
# the workers. This is needed because function registration (register)
# and exceution(process_bundle) are send over different request and we
# do not really know which woker is going to process bundle
# for a function till we get process_bundle request. Moreover
# same function is reused by different process bundle calls and
# potentially get executed by different worker. Hence we need a
# centralized function list shared among all the workers.
self.workers.put(
SdkWorker(
state_handler_factory=self._state_handler_factory,
data_channel_factory=self._data_channel_factory,
fns=self._fns,
profiler_factory=self._profiler_factory))
self._monitoring_thread_pool.submit(self._monitor_process_bundle)
def get_responses():
while True:
response = self._responses.get()
if response is no_more_work:
return
yield response
for work_request in control_stub.Control(get_responses()):
logging.debug('Got work %s', work_request.instruction_id)
request_type = work_request.WhichOneof('request')
# Name spacing the request method with 'request_'. The called method
# will be like self.request_register(request)
getattr(self, SdkHarness.REQUEST_METHOD_PREFIX + request_type)(
work_request)
logging.info('No more requests from control plane')
logging.info('SDK Harness waiting for in-flight requests to complete')
self._alive = False
# Wait until existing requests are processed.
self._progress_thread_pool.shutdown()
self._process_thread_pool.shutdown()
self._monitoring_thread_pool.shutdown(wait=False)
# get_responses may be blocked on responses.get(), but we need to return
# control to its caller.
self._responses.put(no_more_work)
# Stop all the workers and clean all the associated resources
self._data_channel_factory.close()
self._state_handler_factory.close()
logging.info('Done consuming work.')
def _execute(self, task, request):
try:
response = task()
except Exception: # pylint: disable=broad-except
traceback_string = traceback.format_exc()
print(traceback_string, file=sys.stderr)
logging.error(
'Error processing instruction %s. Original traceback is\n%s\n',
request.instruction_id, traceback_string)
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=traceback_string)
self._responses.put(response)
def _request_register(self, request):
def task():
for process_bundle_descriptor in getattr(
request, request.WhichOneof('request')).process_bundle_descriptor:
self._fns[process_bundle_descriptor.id] = process_bundle_descriptor
return beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
self._execute(task, request)
def _request_process_bundle(self, request):
def task():
# Take the free worker. Wait till a worker is free.
worker = self.workers.get()
# Get the first work item in the queue
work = self._process_bundle_queue.get()
# add the instuction_id vs worker map for progress reporting lookup
self._instruction_id_vs_worker[work.instruction_id] = worker
self._unscheduled_process_bundle.pop(work.instruction_id, None)
try:
self._execute(lambda: worker.do_instruction(work), work)
finally:
# Delete the instruction_id <-> worker mapping
self._instruction_id_vs_worker.pop(work.instruction_id, None)
# Put the worker back in the free worker pool
self.workers.put(worker)
# Create a task for each process_bundle request and schedule it
self._process_bundle_queue.put(request)
self._unscheduled_process_bundle[request.instruction_id] = time.time()
self._process_thread_pool.submit(task)
logging.debug(
"Currently using %s threads." % len(self._process_thread_pool._threads))
def _request_process_bundle_progress(self, request):
def task():
instruction_reference = getattr(
request, request.WhichOneof('request')).instruction_reference
if instruction_reference in self._instruction_id_vs_worker:
self._execute(
lambda: self._instruction_id_vs_worker[
instruction_reference
].do_instruction(request), request)
else:
self._execute(lambda: beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=(
'Process bundle request not yet scheduled for instruction {}' if
instruction_reference in self._unscheduled_process_bundle else
'Unknown process bundle instruction {}').format(
instruction_reference)), request)
self._progress_thread_pool.submit(task)
def _monitor_process_bundle(self):
"""
Monitor the unscheduled bundles and log if a bundle is not scheduled for
more than SCHEDULING_DELAY_THRESHOLD_SEC.
"""
while self._alive:
time.sleep(SdkHarness.SCHEDULING_DELAY_THRESHOLD_SEC)
# Check for bundles to be scheduled.
if self._unscheduled_process_bundle:
current_time = time.time()
for instruction_id in self._unscheduled_process_bundle:
request_time = None
try:
request_time = self._unscheduled_process_bundle[instruction_id]
except KeyError:
pass
if request_time:
scheduling_delay = current_time - request_time
if scheduling_delay > SdkHarness.SCHEDULING_DELAY_THRESHOLD_SEC:
logging.warn('Unable to schedule instruction %s for %s',
instruction_id, scheduling_delay)
class SdkWorker(object):
def __init__(self, state_handler_factory, data_channel_factory, fns,
profiler_factory=None):
self.fns = fns
self.state_handler_factory = state_handler_factory
self.data_channel_factory = data_channel_factory
self.active_bundle_processors = {}
self.cached_bundle_processors = collections.defaultdict(list)
self.profiler_factory = profiler_factory
def do_instruction(self, request):
request_type = request.WhichOneof('request')
if request_type:
# E.g. if register is set, this will call self.register(request.register))
return getattr(self, request_type)(getattr(request, request_type),
request.instruction_id)
else:
raise NotImplementedError
def register(self, request, instruction_id):
for process_bundle_descriptor in request.process_bundle_descriptor:
self.fns[process_bundle_descriptor.id] = process_bundle_descriptor
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
def process_bundle(self, request, instruction_id):
with self.get_bundle_processor(
instruction_id,
request.process_bundle_descriptor_reference) as bundle_processor:
with self.maybe_profile(instruction_id):
bundle_processor.process_bundle(instruction_id)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
metrics=bundle_processor.metrics(),
monitoring_infos=bundle_processor.monitoring_infos()))
@contextlib.contextmanager
def get_bundle_processor(self, instruction_id, bundle_descriptor_id):
try:
# pop() is threadsafe
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
state_handler = processor.state_handler
except IndexError:
process_bundle_desc = self.fns[bundle_descriptor_id]
state_handler = self.state_handler_factory.create_state_handler(
process_bundle_desc.state_api_service_descriptor)
processor = bundle_processor.BundleProcessor(
process_bundle_desc,
state_handler,
self.data_channel_factory)
try:
self.active_bundle_processors[instruction_id] = processor
with state_handler.process_instruction_id(instruction_id):
yield processor
finally:
del self.active_bundle_processors[instruction_id]
# Outside the finally block as we only want to re-use on success.
processor.reset()
self.cached_bundle_processors[bundle_descriptor_id].append(processor)
def process_bundle_progress(self, request, instruction_id):
# It is an error to get progress for a not-in-flight bundle.
processor = self.active_bundle_processors.get(request.instruction_reference)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_progress=beam_fn_api_pb2.ProcessBundleProgressResponse(
metrics=processor.metrics() if processor else None,
monitoring_infos=processor.monitoring_infos() if processor else []))
@contextlib.contextmanager
def maybe_profile(self, instruction_id):
if self.profiler_factory:
profiler = self.profiler_factory(instruction_id)
if profiler:
with profiler:
yield
else:
yield
else:
yield
class StateHandlerFactory(with_metaclass(abc.ABCMeta, object)):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_state_handler(self, api_service_descriptor):
"""Returns a ``StateHandler`` from the given ApiServiceDescriptor."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcStateHandlerFactory(StateHandlerFactory):
"""A factory for ``GrpcStateHandler``.
Caches the created channels by ``state descriptor url``.
"""
def __init__(self):
self._state_handler_cache = {}
self._lock = threading.Lock()
self._throwing_state_handler = ThrowingStateHandler()
def create_state_handler(self, api_service_descriptor):
if not api_service_descriptor:
return self._throwing_state_handler
url = api_service_descriptor.url
if url not in self._state_handler_cache:
with self._lock:
if url not in self._state_handler_cache:
logging.info('Creating channel for %s', url)
grpc_channel = grpc.insecure_channel(
url,
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size is
# controlled in a layer above.
options=[("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)])
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(grpc_channel,
WorkerIdInterceptor())
self._state_handler_cache[url] = GrpcStateHandler(
beam_fn_api_pb2_grpc.BeamFnStateStub(grpc_channel))
return self._state_handler_cache[url]
def close(self):
logging.info('Closing all cached gRPC state handlers.')
for _, state_handler in self._state_handler_cache.items():
state_handler.done()
self._state_handler_cache.clear()
class ThrowingStateHandler(object):
"""A state handler that errors on any requests."""
def blocking_get(self, state_key, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
def blocking_append(self, state_key, data, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
def blocking_clear(self, state_key, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
class GrpcStateHandler(object):
_DONE = object()
def __init__(self, state_stub):
self._lock = threading.Lock()
self._state_stub = state_stub
self._requests = queue.Queue()
self._responses_by_id = {}
self._last_id = 0
self._exc_info = None
self._context = threading.local()
self.start()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id):
if getattr(self._context, 'process_instruction_id', None) is not None:
raise RuntimeError(
'Already bound to %r' % self._context.process_instruction_id)
self._context.process_instruction_id = bundle_id
try:
yield
finally:
self._context.process_instruction_id = None
def start(self):
self._done = False
def request_iter():
while True:
request = self._requests.get()
if request is self._DONE or self._done:
break
yield request
responses = self._state_stub.State(request_iter())
def pull_responses():
try:
for response in responses:
self._responses_by_id[response.id].set(response)
if self._done:
break
except: # pylint: disable=bare-except
self._exc_info = sys.exc_info()
raise
reader = threading.Thread(target=pull_responses, name='read_state')
reader.daemon = True
reader.start()
def done(self):
self._done = True
self._requests.put(self._DONE)
def blocking_get(self, state_key):
response = self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
get=beam_fn_api_pb2.StateGetRequest()))
if response.get.continuation_token:
raise NotImplementedError
return response.get.data
def blocking_append(self, state_key, data):
self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
append=beam_fn_api_pb2.StateAppendRequest(data=data)))
def blocking_clear(self, state_key):
self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
clear=beam_fn_api_pb2.StateClearRequest()))
def _blocking_request(self, request):
request.id = self._next_id()
request.instruction_reference = self._context.process_instruction_id
self._responses_by_id[request.id] = future = _Future()
self._requests.put(request)
while not future.wait(timeout=1):
if self._exc_info:
t, v, tb = self._exc_info
raise_(t, v, tb)
elif self._done:
raise RuntimeError()
del self._responses_by_id[request.id]
response = future.get()
if response.error:
raise RuntimeError(response.error)
else:
return response
def _next_id(self):
self._last_id += 1
return str(self._last_id)
class _Future(object):
"""A simple future object to implement blocking requests.
"""
def __init__(self):
self._event = threading.Event()
def wait(self, timeout=None):
return self._event.wait(timeout)
def get(self, timeout=None):
if self.wait(timeout):
return self._value
else:
raise LookupError()
def set(self, value):
self._value = value
self._event.set()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
GPLv3 license (ASTRA toolbox)
Note that the TomoPhantom package is released under Apache License, Version 2.0
* Script to generate 3D analytical phantoms and their projection data using TomoPhantom
* Synthetic flat fields are also genererated and noise incorporated into data
together with normalisation errors. This simulates more challeneging data for
reconstruction.
* tomobar is required for reconstruction
>>>>> Dependencies (reconstruction): <<<<<
1. ASTRA toolbox: conda install -c astra-toolbox astra-toolbox
2. tomobar: conda install -c dkazanc tomobar
or install from https://github.com/dkazanc/ToMoBAR
@author: Daniil Kazantsev
"""
import timeit
import os
import matplotlib.pyplot as plt
import numpy as np
import tomophantom
from tomophantom import TomoP3D
from tomophantom.supp.qualitymetrics import QualityTools
from tomophantom.supp.flatsgen import synth_flats
print ("Building 3D phantom using TomoPhantom software")
tic=timeit.default_timer()
model = 17 # select a model number from the library
N_size = 256 # Define phantom dimensions using a scalar value (cubic phantom)
path = os.path.dirname(tomophantom.__file__)
path_library3D = os.path.join(path, "Phantom3DLibrary.dat")
#This will generate a N_size x N_size x N_size phantom (3D)
phantom_tm = TomoP3D.Model(model, N_size, path_library3D)
toc=timeit.default_timer()
Run_time = toc - tic
print("Phantom has been built in {} seconds".format(Run_time))
sliceSel = int(0.5*N_size)
#plt.gray()
plt.figure()
plt.subplot(131)
plt.imshow(phantom_tm[sliceSel,:,:],vmin=0, vmax=1)
plt.title('3D Phantom, axial view')
plt.subplot(132)
plt.imshow(phantom_tm[:,sliceSel,:],vmin=0, vmax=1)
plt.title('3D Phantom, coronal view')
plt.subplot(133)
plt.imshow(phantom_tm[:,:,sliceSel],vmin=0, vmax=1)
plt.title('3D Phantom, sagittal view')
plt.show()
# Projection geometry related parameters:
Horiz_det = int(np.sqrt(2)*N_size) # detector column count (horizontal)
Vert_det = N_size # detector row count (vertical) (no reason for it to be > N)
angles_num = int(0.5*np.pi*N_size); # angles number
angles = np.linspace(0.0,179.9,angles_num,dtype='float32') # in degrees
angles_rad = angles*(np.pi/180.0)
#%%
print ("Building 3D analytical projection data with TomoPhantom")
projData3D_analyt= TomoP3D.ModelSino(model, N_size, Horiz_det, Vert_det, angles, path_library3D)
intens_max_clean = np.max(projData3D_analyt)
sliceSel = 150
plt.figure()
plt.subplot(131)
plt.imshow(projData3D_analyt[:,sliceSel,:],vmin=0, vmax=intens_max_clean)
plt.title('2D Projection (analytical)')
plt.subplot(132)
plt.imshow(projData3D_analyt[sliceSel,:,:],vmin=0, vmax=intens_max_clean)
plt.title('Sinogram view')
plt.subplot(133)
plt.imshow(projData3D_analyt[:,:,sliceSel],vmin=0, vmax=intens_max_clean)
plt.title('Tangentogram view')
plt.show()
#%%
print ("Simulate synthetic flat fields, add flat field background to the projections and add noise")
I0 = 15000; # Source intensity
flatsnum = 20 # the number of the flat fields required
[projData3D_noisy, flatsSIM] = synth_flats(projData3D_analyt,
source_intensity = I0, source_variation=0.02,\
arguments_Bessel = (1,10,10,12),\
specklesize = 15,\
kbar = 0.3,
jitter = 1.0,
sigmasmooth = 3, flatsnum=flatsnum)
#del projData3D_analyt
plt.figure()
plt.subplot(121)
plt.imshow(projData3D_noisy[:,0,:])
plt.title('2D Projection (before normalisation)')
plt.subplot(122)
plt.imshow(flatsSIM[:,0,:])
plt.title('A selected simulated flat-field')
plt.show()
#%%
print ("Normalise projections using ToMoBAR software")
from tomobar.supp.suppTools import normaliser
# normalise the data, the required format is [detectorsX, Projections, detectorsY]
projData3D_norm = normaliser(projData3D_noisy, flatsSIM, darks=None, log='true', method='mean')
#del projData3D_noisy
intens_max = 0.3*np.max(projData3D_norm)
sliceSel = 150
plt.figure()
plt.subplot(131)
plt.imshow(projData3D_norm[:,sliceSel,:],vmin=0, vmax=intens_max)
plt.title('Normalised 2D Projection (erroneous)')
plt.subplot(132)
plt.imshow(projData3D_norm[sliceSel,:,:],vmin=0, vmax=intens_max)
plt.title('Sinogram view')
plt.subplot(133)
plt.imshow(projData3D_norm[:,:,sliceSel],vmin=0, vmax=intens_max)
plt.title('Tangentogram view')
plt.show()
#%%
# initialise tomobar DIRECT reconstruction class ONCE
from tomobar.methodsDIR import RecToolsDIR
RectoolsDIR = RecToolsDIR(DetectorsDimH = Horiz_det, # DetectorsDimH # detector dimension (horizontal)
DetectorsDimV = Vert_det, # DetectorsDimV # detector dimension (vertical) for 3D case only
CenterRotOffset = None, # Center of Rotation (CoR) scalar (for 3D case only)
AnglesVec = angles_rad, # array of angles in radians
ObjSize = N_size, # a scalar to define reconstructed object dimensions
device_projector = 'gpu')
print ("Reconstruction using FBP from tomobar")
recNumerical= RectoolsDIR.FBP(projData3D_norm) # FBP reconstruction
recNumerical *= intens_max_clean
sliceSel = int(0.5*N_size)
max_val = 1
#plt.gray()
plt.figure()
plt.subplot(131)
plt.imshow(recNumerical[sliceSel,:,:],vmin=0, vmax=max_val)
plt.title('3D Reconstruction, axial view')
plt.subplot(132)
plt.imshow(recNumerical[:,sliceSel,:],vmin=0, vmax=max_val)
plt.title('3D Reconstruction, coronal view')
plt.subplot(133)
plt.imshow(recNumerical[:,:,sliceSel],vmin=0, vmax=max_val)
plt.title('3D Reconstruction, sagittal view')
plt.show()
# calculate errors
Qtools = QualityTools(phantom_tm, recNumerical)
RMSE = Qtools.rmse()
print("Root Mean Square Error is {}".format(RMSE))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("Reconstructing with FISTA-OS method using tomobar")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
# initialise tomobar ITERATIVE reconstruction class ONCE
from tomobar.methodsIR import RecToolsIR
Rectools = RecToolsIR(DetectorsDimH = Horiz_det, # DetectorsDimH # detector dimension (horizontal)
DetectorsDimV = Vert_det, # DetectorsDimV # detector dimension (vertical) for 3D case only
CenterRotOffset = 0.0, # Center of Rotation (CoR) scalar (for 3D case only)
AnglesVec = angles_rad, # array of angles in radians
ObjSize = N_size, # a scalar to define reconstructed object dimensions
datafidelity='LS',# data fidelity, choose LS, PWLS (wip), GH (wip), Student (wip)
device_projector='gpu')
#%%
# prepare dictionaries with parameters:
_data_ = {'projection_norm_data' : projData3D_norm,
'OS_number' : 10} # data dictionary
lc = Rectools.powermethod(_data_) # calculate Lipschitz constant (run once to initialise)
# algorithm parameters
_algorithm_ = {'iterations' : 15,
'lipschitz_const' : lc}
# adding regularisation using the CCPi regularisation toolkit
_regularisation_ = {'method' : 'PD_TV',
'regul_param' : 0.0000035,
'iterations' : 80,
'device_regulariser': 'gpu'}
RecFISTA_os_reg = Rectools.FISTA(_data_, _algorithm_, _regularisation_)
RecFISTA_os_reg *= intens_max_clean
sliceSel = int(0.5*N_size)
max_val = 1
plt.figure()
plt.subplot(131)
plt.imshow(RecFISTA_os_reg[sliceSel,:,:],vmin=0, vmax=max_val)
plt.title('3D FISTA-TV Reconstruction, axial view')
plt.subplot(132)
plt.imshow(RecFISTA_os_reg[:,sliceSel,:],vmin=0, vmax=max_val)
plt.title('3D FISTA-TV Reconstruction, coronal view')
plt.subplot(133)
plt.imshow(RecFISTA_os_reg[:,:,sliceSel],vmin=0, vmax=max_val)
plt.title('3D FISTA-TV Reconstruction, sagittal view')
plt.show()
#%%
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_congress_haht
----------------------------------
Replicated policy engine high availability tests for `congress` module.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import shutil
import subprocess
import sys
import tempfile
import time
# Note: monkey patch to allow running this test standalone under 'nose'
import eventlet
eventlet.monkey_patch()
from oslo_log import log as logging
import requests
import tenacity
from congress.db import api as db
from congress.db import db_policy_rules
from congress.tests import base
from congress.tests import helper
LOG = logging.getLogger(__name__)
class TestCongressHAHT(base.SqlTestCase):
class client(object):
version = '/v1'
def __init__(self, port, host='0.0.0.0'):
self.host = host
self.port = port
self.base_url = 'http://' + host + ':%d' % port
def url(self, suffix=None):
if suffix is None:
return self.base_url
else:
return self.base_url + self.version + '/' + suffix
def get(self, suffix=None):
return requests.get(self.url(suffix))
def delete(self, suffix=None):
return requests.delete(self.url(suffix))
def post(self, suffix=None, json=None):
x = requests.post(self.url(suffix), json=json)
# print("status: %s, text: %s" % (x.status_code, x.text))
return x
def setUp(self):
super(TestCongressHAHT, self).setUp()
assert sys.executable is not None,\
'test cannot proceed when sys.executable is None'
# establish clean starting DB
self.clean_db()
shutil.copy(helper.test_path('haht/test.db.clean'),
helper.test_path('haht/test.db'))
self.clients = []
self.procs = []
self.outfiles = {}
self.errfiles = {}
self.pe1 = self.start_pe(1, 4001)
self.pe2 = self.start_pe(2, 4002)
def dump_nodes_logs(self):
LOG.error('PE1 process output:\n%s' %
self.read_output_file(self.outfiles[1]))
LOG.error('PE2 process output:\n%s' %
self.read_output_file(self.outfiles[2]))
def clean_db(self):
session = db.get_session()
with session.begin(subtransactions=True):
session.query(db_policy_rules.Policy).delete()
session.query(db_policy_rules.PolicyRule).delete()
def start_pe(self, num, port):
self.outfiles[num] = tempfile.NamedTemporaryFile(
mode='a+', suffix='.out',
prefix='congress-pe%d-%d-' % (num, port),
dir='/tmp')
self.errfiles[num] = tempfile.NamedTemporaryFile(
mode='a+', suffix='.err',
prefix='congress-pe%d-%d-' % (num, port),
dir='/tmp')
args = [sys.executable,
'congress/server/congress_server.py',
'--node-id',
'node_%d' % num,
'--api',
'--policy-engine',
'--config-file',
'congress/tests/etc/congress.conf.test.ha_pe%d' % num]
pe = subprocess.Popen(args,
stdout=self.outfiles[num],
stderr=self.outfiles[num],
cwd=helper.root_path())
self.addCleanup(pe.kill)
pe = self.client(port)
try:
helper.retry_check_function_return_value(
lambda: pe.get().status_code, 200)
except tenacity.RetryError:
out = self.read_output_file(self.outfiles[num])
LOG.error('PE%d failed to start. Process output:\n%s' % (num, out))
raise
return pe
def read_output_file(self, file):
file.flush()
file.seek(0)
return ''.join(file.readlines())
def tail(self, thing, length=20):
lines = thing.split('\n')
return '\n'.join(lines[-length:])
def test_policy_create_delete(self):
# create policy alice in PE1
self.assertEqual(self.pe1.post(
suffix='policies', json={'name': 'alice'}).status_code, 201)
# check policy alice in PE1
self.assertEqual(self.pe1.get('policies/alice').status_code, 200)
# check policy alice in PE2
helper.retry_check_function_return_value(
lambda: self.pe2.get('policies/alice').status_code, 200)
# create policy bob in PE2
self.assertEqual(self.pe2.post(
suffix='policies', json={'name': 'bob'}).status_code, 201)
# check policy bob in PE2
self.assertEqual(self.pe2.get('policies/bob').status_code, 200)
# check policy bob in PE1
helper.retry_check_function_return_value(
lambda: self.pe1.get('policies/bob').status_code, 200)
# check policy listings
self.assertEqual(len(self.pe1.get('policies').json()['results']), 4)
self.assertEqual(len(self.pe2.get('policies').json()['results']), 4)
# delete policy alice in PE2, and check deleted on both PE
self.assertEqual(self.pe2.delete('policies/alice').status_code, 200)
self.assertEqual(self.pe2.get('policies/alice').status_code, 404)
helper.retry_check_function_return_value(
lambda: self.pe1.get('policies/alice').status_code, 404)
# delete policy bob in PE2, and check deleted on both PE
self.assertEqual(self.pe2.delete('policies/bob').status_code, 200)
self.assertEqual(self.pe2.get('policies/bob').status_code, 404)
helper.retry_check_function_return_value(
lambda: self.pe1.get('policies/bob').status_code, 404)
def test_policy_rule_crud(self):
try:
# create policy alice in PE1
self.assertEqual(self.pe1.post(
suffix='policies', json={'name': 'alice'}).status_code, 201)
# add rule to PE1
j = {'rule': 'p(x) :- q(x)', 'name': 'rule1'}
self.assertEqual(self.pe1.post(
suffix='policies/alice/rules', json=j).status_code, 201)
self.assertEqual(
self.pe1.get('policies/alice/rules').status_code, 200)
self.assertEqual(
len(self.pe1.get('policies/alice/rules').
json()['results']), 1)
# retry necessary because of synchronization
helper.retry_check_function_return_value(
lambda: len(self.pe2.get('policies/alice/rules').
json()['results']), 1)
# add rule to PE2
j = {'rule': 'q(1)', 'name': 'rule2'}
self.assertEqual(self.pe2.post(
suffix='policies/alice/rules', json=j).status_code, 201)
# check 2 rule in each pe
self.assertEqual(len(
self.pe2.get('policies/alice/rules').json()['results']), 2)
self.assertEqual(len(
self.pe1.get('policies/alice/rules').json()['results']), 2)
# grab rule IDs
rules = self.pe2.get('policies/alice/rules').json()['results']
id1 = next(x['id'] for x in rules if x['name'] == 'rule1')
id2 = next(x['id'] for x in rules if x['name'] == 'rule2')
# show rules by id
self.assertEqual(
self.pe1.get('policies/alice/rules/%s' % id1).status_code, 200)
self.assertEqual(
self.pe2.get('policies/alice/rules/%s' % id1).status_code, 200)
self.assertEqual(
self.pe1.get('policies/alice/rules/%s' % id2).status_code, 200)
self.assertEqual(
self.pe2.get('policies/alice/rules/%s' % id2).status_code, 200)
# list tables
self.assertEqual(len(
self.pe1.get('policies/alice/tables').json()['results']), 2)
self.assertEqual(len(
self.pe2.get('policies/alice/tables').json()['results']), 2)
# show tables
self.assertEqual(
self.pe1.get('policies/alice/tables/p').status_code, 200)
self.assertEqual(
self.pe2.get('policies/alice/tables/p').status_code, 200)
self.assertEqual(
self.pe1.get('policies/alice/tables/q').status_code, 200)
self.assertEqual(
self.pe2.get('policies/alice/tables/q').status_code, 200)
# delete from PE1 and check both have 1 rule left
self.assertEqual(self.pe1.delete(
suffix='policies/alice/rules/%s' % id1).status_code, 200)
self.assertEqual(
len(self.pe1.get('policies/alice/rules').
json()['results']), 1)
self.assertEqual(
len(self.pe2.get('policies/alice/rules').
json()['results']), 1)
# delete from PE2 and check both have 0 rules left
self.assertEqual(self.pe2.delete(
suffix='policies/alice/rules/%s' % id2).status_code, 200)
self.assertEqual(
len(self.pe1.get('policies/alice/rules').
json()['results']), 0)
self.assertEqual(
len(self.pe2.get('policies/alice/rules').
json()['results']), 0)
except Exception:
self.dump_nodes_logs()
raise
def test_conflicting_policy_create_delete(self):
try:
# create policy alice in PE1
self.assertEqual(self.pe1.post(
suffix='policies', json={'name': 'alice'}).status_code, 201)
self.assertEqual(self.pe2.post(
suffix='policies', json={'name': 'alice'}).status_code, 409)
# create policy bob in PE1
self.assertEqual(self.pe1.post(
suffix='policies', json={'name': 'bob'}).status_code, 201)
self.assertEqual(self.pe2.delete(
suffix='policies/bob').status_code, 200)
self.assertEqual(self.pe1.post(
suffix='policies', json={'name': 'bob'}).status_code, 201)
except Exception:
LOG.error('PE1 process output:\n%s' %
self.read_output_file(self.outfiles[1]))
LOG.error('PE2 process output:\n%s' %
self.read_output_file(self.outfiles[2]))
raise
def test_policy_rule_create_delete(self):
try:
# create policy alice in PE1
self.assertEqual(self.pe1.post(
suffix='policies', json={'name': 'alice'}).status_code, 201)
# add rule to PE1 (retry since 500 on first attempt)
j = {'rule': 'p(x) :- q(x)', 'name': 'rule1'}
self.assertEqual(self.pe1.post(
suffix='policies/alice/rules', json=j).status_code, 201)
self.assertEqual(
self.pe1.get('policies/alice/rules').status_code, 200)
self.assertEqual(
len(self.pe1.get('policies/alice/rules').
json()['results']), 1)
time.sleep(10) # wait for sync before reading from PE2
self.assertEqual(
len(self.pe2.get('policies/alice/rules').
json()['results']), 1)
# add rule to PE2
j = {'rule': 'q(1)', 'name': 'rule2'}
self.assertEqual(self.pe2.post(
suffix='policies/alice/rules', json=j).status_code, 201)
# check 2 rule in each pe
self.assertEqual(len(
self.pe2.get('policies/alice/rules').json()['results']), 2)
self.assertEqual(len(
self.pe1.get('policies/alice/rules').json()['results']), 2)
# grab rule IDs
rules = self.pe2.get('policies/alice/rules').json()['results']
id1 = next(x['id'] for x in rules if x['name'] == 'rule1')
id2 = next(x['id'] for x in rules if x['name'] == 'rule2')
# delete from PE1 and check both have 1 rule left
self.assertEqual(self.pe1.delete(
suffix='policies/alice/rules/%s' % id1).status_code, 200)
self.assertEqual(
len(self.pe1.get('policies/alice/rules').
json()['results']), 1)
self.assertEqual(
len(self.pe2.get('policies/alice/rules').
json()['results']), 1)
# delete from PE2 and check both have 0 rules left
self.assertEqual(self.pe2.delete(
suffix='policies/alice/rules/%s' % id2).status_code, 200)
self.assertEqual(
len(self.pe1.get('policies/alice/rules').
json()['results']), 0)
self.assertEqual(
len(self.pe2.get('policies/alice/rules').
json()['results']), 0)
except Exception:
self.dump_nodes_logs()
raise
def test_policy_rule_create_delete_interference(self):
try:
# create policy alice in PE1
self.assertEqual(self.pe1.post(
suffix='policies', json={'name': 'alice'}).status_code, 201)
j = {'rule': 'p(x) :- q(x)', 'name': 'rule1'}
rule_create_res = self.pe2.post(
suffix='policies/alice/rules', json=j)
self.assertEqual(rule_create_res.status_code, 201)
rule_id = rule_create_res.json()['id']
self.assertEqual(self.pe1.delete(
suffix='policies/alice/rules/%s' % rule_id).status_code, 200)
self.assertEqual(self.pe2.post(
suffix='policies/alice/rules', json=j).status_code, 201)
except Exception:
self.dump_nodes_logs()
raise
def test_policy_rule_duplicate(self):
try:
# create policy alice in PE1
self.assertEqual(self.pe1.post(
suffix='policies', json={'name': 'alice'}).status_code, 201)
j = {'rule': 'p(x) :- q(x)', 'name': 'rule1'}
self.assertEqual(self.pe2.post(
suffix='policies/alice/rules', json=j).status_code, 201)
self.assertEqual(self.pe1.post(
suffix='policies/alice/rules', json=j).status_code, 409)
self.assertEqual(self.pe2.post(
suffix='policies/alice/rules', json=j).status_code, 409)
self.assertEqual(
self.pe1.get('policies/alice/rules').status_code, 200)
self.assertLessEqual(
len(self.pe1.get('policies/alice/rules').json()['results']),
1)
self.assertEqual(
len(self.pe2.get('policies/alice/rules').
json()['results']), 1)
except Exception:
self.dump_nodes_logs()
raise
def test_policy_rule_recursion(self):
try:
# create policy alice in PE1
self.assertEqual(self.pe1.post(
suffix='policies', json={'name': 'alice'}).status_code, 201)
r1 = {'rule': 'p(x) :- q(x)', 'name': 'rule1'}
r2 = {'rule': 'q(x) :- p(x)', 'name': 'rule2'}
self.assertEqual(self.pe2.post(
suffix='policies/alice/rules', json=r1).status_code, 201)
self.assertEqual(self.pe1.post(
suffix='policies/alice/rules', json=r2).status_code, 400)
self.assertEqual(self.pe2.post(
suffix='policies/alice/rules', json=r2).status_code, 400)
self.assertEqual(
self.pe1.get('policies/alice/rules').status_code, 200)
self.assertLessEqual(
len(self.pe1.get('policies/alice/rules').json()['results']),
1)
self.assertEqual(
len(self.pe2.get('policies/alice/rules').
json()['results']), 1)
except Exception:
self.dump_nodes_logs()
raise
def test_policy_rule_schema_mismatch(self):
try:
# create policy alice in PE1
self.assertEqual(self.pe1.post(
suffix='policies', json={'name': 'alice'}).status_code, 201)
r1 = {'rule': 'p(x) :- q(x)', 'name': 'rule1'}
r2 = {'rule': 'p(x) :- q(x, x)', 'name': 'rule2'}
self.assertEqual(self.pe2.post(
suffix='policies/alice/rules', json=r1).status_code, 201)
self.assertEqual(self.pe1.post(
suffix='policies/alice/rules', json=r2).status_code, 400)
self.assertEqual(self.pe2.post(
suffix='policies/alice/rules', json=r2).status_code, 400)
self.assertEqual(
self.pe1.get('policies/alice/rules').status_code, 200)
self.assertLessEqual(
len(self.pe1.get('policies/alice/rules').json()['results']),
1)
self.assertEqual(
self.pe2.get('policies/alice/rules').status_code, 200)
self.assertEqual(
len(self.pe2.get('policies/alice/rules').
json()['results']), 1)
except Exception:
self.dump_nodes_logs()
raise
def test_policy_rule_evaluation(self):
try:
# create policy alice in PE1
self.assertEqual(self.pe1.post(
suffix='policies', json={'name': 'alice'}).status_code, 201)
# add rule to PE1
j = {'rule': 'p(x) :- q(x)', 'name': 'rule0'}
res = self.pe1.post(
suffix='policies/alice/rules', json=j)
self.assertEqual(res.status_code, 201)
r_id = res.json()['id']
# add data to PE1
j = {'rule': ' q( 1 ) ', 'name': 'rule1'}
res = self.pe1.post(
suffix='policies/alice/rules', json=j)
self.assertEqual(res.status_code, 201)
q1_id = res.json()['id']
# # add data to PE2
j = {'rule': ' q ( 2 ) ', 'name': 'rule2'}
self.assertEqual(self.pe2.post(
suffix='policies/alice/rules', json=j).status_code, 201)
# eval on PE1
helper.retry_check_function_return_value_table(
lambda: [x['data'] for x in
self.pe1.get('policies/alice/tables/p/rows').json()[
'results']],
[[1], [2]])
# eval on PE2
helper.retry_check_function_return_value_table(
lambda: [x['data'] for x in
self.pe2.get('policies/alice/tables/p/rows').json()[
'results']],
[[1], [2]])
self.assertEqual(self.pe1.delete(
suffix='policies/alice/rules/%s' % q1_id).status_code, 200)
# eval on PE1
helper.retry_check_function_return_value_table(
lambda: [x['data'] for x in
self.pe1.get('policies/alice/tables/p/rows').json()[
'results']],
[[2]])
# eval on PE2
helper.retry_check_function_return_value_table(
lambda: [x['data'] for x in
self.pe2.get('policies/alice/tables/p/rows').json()[
'results']],
[[2]])
self.assertEqual(self.pe2.delete(
suffix='policies/alice/rules/%s' % r_id).status_code, 200)
helper.retry_check_function_return_value(lambda: self.pe1.get(
'policies/alice/tables/p/rows').status_code, 404)
helper.retry_check_function_return_value(lambda: self.pe2.get(
'policies/alice/tables/p/rows').status_code, 404)
except Exception:
self.dump_nodes_logs()
raise
|
|
# coding=utf-8
# Copyright 2020 Google AI, Google Brain, the HuggingFace Inc. team and Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ALBERT model with Patience-based Early Exit. """
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.albert.modeling_albert import (
ALBERT_INPUTS_DOCSTRING,
ALBERT_START_DOCSTRING,
AlbertModel,
AlbertPreTrainedModel,
AlbertTransformer,
)
logger = logging.getLogger(__name__)
class AlbertTransformerWithPabee(AlbertTransformer):
def adaptive_forward(self, hidden_states, current_layer, attention_mask=None, head_mask=None):
if current_layer == 0:
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
else:
hidden_states = hidden_states[0]
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
# Index of the hidden group
group_idx = int(current_layer / (self.config.num_hidden_layers / self.config.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states,
attention_mask,
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
)
hidden_states = layer_group_output[0]
return (hidden_states,)
@add_start_docstrings(
"The bare ALBERT Model transformer with PABEE outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
class AlbertModelWithPabee(AlbertModel):
def __init__(self, config):
super().__init__(config)
self.encoder = AlbertTransformerWithPabee(config)
self.init_weights()
self.patience = 0
self.inference_instances_num = 0
self.inference_layers_num = 0
self.regression_threshold = 0
def set_regression_threshold(self, threshold):
self.regression_threshold = threshold
def set_patience(self, patience):
self.patience = patience
def reset_stats(self):
self.inference_instances_num = 0
self.inference_layers_num = 0
def log_stats(self):
avg_inf_layers = self.inference_layers_num / self.inference_instances_num
message = f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up = {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
print(message)
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_dropout=None,
output_layers=None,
regression=False,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = embedding_output
if self.training:
res = []
for i in range(self.config.num_hidden_layers):
encoder_outputs = self.encoder.adaptive_forward(
encoder_outputs,
current_layer=i,
attention_mask=extended_attention_mask,
head_mask=head_mask,
)
pooled_output = self.pooler_activation(self.pooler(encoder_outputs[0][:, 0]))
logits = output_layers[i](output_dropout(pooled_output))
res.append(logits)
elif self.patience == 0: # Use all layers for inference
encoder_outputs = self.encoder(encoder_outputs, extended_attention_mask, head_mask=head_mask)
pooled_output = self.pooler_activation(self.pooler(encoder_outputs[0][:, 0]))
res = [output_layers[self.config.num_hidden_layers - 1](pooled_output)]
else:
patient_counter = 0
patient_result = None
calculated_layer_num = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
encoder_outputs = self.encoder.adaptive_forward(
encoder_outputs,
current_layer=i,
attention_mask=extended_attention_mask,
head_mask=head_mask,
)
pooled_output = self.pooler_activation(self.pooler(encoder_outputs[0][:, 0]))
logits = output_layers[i](pooled_output)
if regression:
labels = logits.detach()
if patient_result is not None:
patient_labels = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
patient_counter = 0
else:
labels = logits.detach().argmax(dim=1)
if patient_result is not None:
patient_labels = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(patient_labels)):
patient_counter += 1
else:
patient_counter = 0
patient_result = logits
if patient_counter == self.patience:
break
res = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Albert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
ALBERT_START_DOCSTRING,
)
class AlbertForSequenceClassificationWithPabee(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModelWithPabee(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifiers = nn.ModuleList(
[nn.Linear(config.hidden_size, self.config.num_labels) for _ in range(config.num_hidden_layers)]
)
self.init_weights()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
logits ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import AlbertTokenizer
from pabee import AlbertForSequenceClassificationWithPabee
from torch import nn
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForSequenceClassificationWithPabee.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
logits = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_dropout=self.dropout,
output_layers=self.classifiers,
regression=self.num_labels == 1,
)
outputs = (logits[-1],)
if labels is not None:
total_loss = None
total_weights = 0
for ix, logits_item in enumerate(logits):
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits_item.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits_item.view(-1, self.num_labels), labels.view(-1))
if total_loss is None:
total_loss = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
outputs = (total_loss / total_weights,) + outputs
return outputs
|
|
import inspect
import os
import sys
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.loader import MigrationLoader
from django.utils.timezone import now
from . import __version__ as VERSION
from .conf import get_default_language
from .utils import split_translated_fieldname
try:
from modeltranslation.translator import translator
DJANGO_MODELTRANSLATION_AVAILABLE = True
except ImportError:
DJANGO_MODELTRANSLATION_AVAILABLE = False
DEFAULT_LANGUAGE = get_default_language()
def _raise_if_not_django_modeltranslation():
"""Raise if we cannot import django-modeltranslation during migration"""
if not DJANGO_MODELTRANSLATION_AVAILABLE:
raise ImproperlyConfigured(
"django-modeltranslation must be still installed when creating"
"the modeltranslation -> modeltrans migrations."
)
def get_translatable_models():
"""
Get the translatable models according to django-modeltranslation
!! only use to migrate from django-modeltranslation !!
"""
_raise_if_not_django_modeltranslation()
return translator.get_registered_models()
def get_translated_fields(Model):
"""
Enumerates the translated fields for a model according to django-modeltranslation.
For example: title_nl, title_en, title_fr, body_nl, body_en, body_fr
!! only use to migrate from django-modeltranslation !!
"""
_raise_if_not_django_modeltranslation()
options = translator.get_options_for_model(Model)
for original_field, fields in options.fields.items():
for translated in fields:
yield translated.name
def copy_translations(Model, fields):
"""
Copy translations for all items in the database for a Model with
translations managed by django-modeltranslation into a json field `i18n`
managed by django-modeltrans.
Values for the default language will be copied to the original field.
Arguments:
Model: A (historical) Model from the migraton's app registry
fields(iterable): list of fields to copy into their new places.
"""
for m in Model.objects.all():
m.i18n = {}
for field in fields:
value = getattr(m, field)
if value in (None, ""):
continue
original_field, lang = split_translated_fieldname(field)
if lang == DEFAULT_LANGUAGE:
setattr(m, original_field, value)
else:
m.i18n[field] = value
m.save()
def get_latest_migration(app_name, connection=None):
"""
Get the name of the latest applied migration and raises if unapplied
migrations exist for the app.
Arguments:
app_name(str): Name of the app.
connection: database connection to get the latest migration for.
Simplified version of
https://github.com/django/django/blob/1.9.2/django/core/management/commands/showmigrations.py#L38-L77
"""
if connection is None:
connection = connections[DEFAULT_DB_ALIAS]
loader = MigrationLoader(connection, ignore_no_migrations=True)
graph = loader.graph
last = None
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
if plan_node in loader.applied_migrations:
last = plan_node[1]
else:
raise Exception("You have unapplied migration(s) for app {}".format(app_name))
shown.add(plan_node)
return last
def get_next_migration_filename(app_name, connection=None, migration_type="data"):
"""
Return name (including the absolute path) of the next migration to insert for this app
"""
latest_migration_name = get_latest_migration(app_name)
next_migration_name = "{0:04d}_i18n_{1}_migration.py".format(
int(latest_migration_name[0:4]) + 1, migration_type
)
app_base_path = os.path.dirname(apps.get_app_config(app_name).module.__file__)
return os.path.join(app_base_path, "migrations", next_migration_name)
class I18nMigration(object):
helper_functions = ()
template = """
# -*- coding: utf-8 -*-
# Generated by django-modeltrans {version} on {timestamp}
from __future__ import print_function, unicode_literals
from django.db import migrations
DEFAULT_LANGUAGE = "{DEFAULT_LANGUAGE}"
{helpers}
class Migration(migrations.Migration):
dependencies = [
("{app}", "{last_migration}"),
]
operations = [
{operations}
]
"""
def __init__(self, app):
self.models = []
self.app = app
self.migration_filename = (
get_latest_migration(self.app) or "# TODO: manually insert latest migration here"
)
def get_helper_functions(self):
def to_str(fn):
return inspect.getsource(fn) if callable(fn) else fn
for fn in self.helper_functions:
yield to_str(fn)
for fn in self.get_extra_helper_functions():
yield to_str(fn)
def get_extra_helper_functions(self):
return []
def add_model(self, Model, fields):
self.models.append((Model, fields))
def get_helper_src(self):
return "\n\n".join(self.get_helper_functions())
def write(self, out=None):
if out is None:
out = sys.stdout
out.write(
self.template.format(
version=VERSION,
DEFAULT_LANGUAGE=getattr(
settings, "MODELTRANSLATION_DEFAULT_LANGUAGE", get_default_language()
),
timestamp=now().strftime("%Y-%m-%d %H:%M"),
helpers=self.get_helper_src(),
app=self.app,
last_migration=self.migration_filename,
operations=self.get_operations(),
)
)
def write_migration_file(self):
"""
Write the migration to file.
"""
filename = get_next_migration_filename(self.app, migration_type=self.migration_type)
with open(filename, "w") as f:
self.write(f)
return filename
class I18nDataMigration(I18nMigration):
migration_type = "data"
helper_functions = (split_translated_fieldname, copy_translations)
forwards_template = """
def forwards(apps, schema_editor):
app = '{app}'
todo = (
{todo},
)
for model, fields in todo:
Model = apps.get_model(app, model)
copy_translations(Model, fields)
"""
def get_extra_helper_functions(self):
yield self.forwards_template.format(
todo=",\n ".join(
[str((Model.__name__, fields)) for Model, fields in self.models]
),
app=self.app,
)
def get_operations(self):
return """
# The copying of values is (sort of) reversable by a no-op:
# - values are copied into i18n (which is not used by anything but django-modeltrans)
# - the default language is copied to the orignal field, which was not used
# with django-modeltrans.
migrations.RunPython(forwards, migrations.RunPython.noop),
"""
|
|
import copy
import json
import os
import re
from pydmrs.matching.exact_matching import dmrs_exact_matching
from shapeworld.analyzers.ace import Ace
from shapeworld.captions import Attribute, Relation, EntityType, Selector, Existential, Quantifier, NumberBound, ComparativeQuantifier, Proposition
from shapeworld.realizers.dmrs.dmrs import Dmrs, SubDmrs, create_sortinfo
from shapeworld.realizers.dmrs.realizer import prepare_ace, prepare_grammar
int_regex = re.compile(pattern=r'^-?[0-9]+$')
float_regex = re.compile(pattern=r'^-?[0-9]+.[0-9]+$')
nested_tuple_regex = re.compile(pattern=r'^([a-z]+(\+[a-z]+)+)(,([a-z]+(\+[a-z]+)+))+$')
tuple_regex = re.compile(pattern=r'^[a-z]+(,[a-z]+)+$')
def parse_string(string):
if int_regex.match(string):
return int(string)
elif float_regex.match(string):
return float(string)
elif nested_tuple_regex.match(string):
return tuple(tuple(s.split('+')) for s in string.split(','))
elif tuple_regex.match(string):
return tuple(string.split(','))
else:
return str(string)
class DmrsAnalyzer(object):
def __init__(self, language):
prepare_ace()
prepare_grammar(language=language)
directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'realizers', 'dmrs')
self.ace_path = os.path.join(directory, 'resources', 'ace')
self.erg_path = os.path.join(directory, 'languages', language + '.dat')
self.ace = Ace(executable=self.ace_path, grammar=self.erg_path)
with open(os.path.join(directory, 'languages', language + '.json'), 'r') as filehandle:
language = json.load(fp=filehandle)
if 'sortinfos' in language:
sortinfo_classes = dict()
sortinfo_shortforms = dict()
for cvarsort, sortinfo in language['sortinfos'].items():
assert 'features' in sortinfo
sortinfo_class = create_sortinfo(cvarsort, tuple(sortinfo['features']))
sortinfo_classes[cvarsort] = sortinfo_class
if 'shortform' in sortinfo:
shortform = sortinfo['shortform']
assert all(feature in sortinfo_class.features for feature in shortform)
assert all(len(key) == 1 and key not in '_?' for feature, kvs in shortform.items() for key in kvs)
sortinfo_shortforms[cvarsort] = shortform
else:
sortinfo_classes = None
sortinfo_shortforms = None
self.unused = set()
self.attributes = dict()
self.attribute_by_key = dict()
self.relation_attribute = None
if 'attributes' in language:
for predtype, values in language['attributes'].items():
predtype = parse_string(predtype)
if predtype == 'relation':
self.relation_attribute = Dmrs.parse(values['dmrs'])
self.unused.add(('attribute', 'relation'))
continue
elif predtype not in self.attributes:
self.attributes[predtype] = dict()
for value, attribute in values.items():
value = parse_string(value)
self.attributes[predtype][value] = Dmrs.parse(attribute['dmrs'])
assert attribute['key'] not in self.attribute_by_key
self.attribute_by_key[attribute['key']] = (predtype, value)
self.unused.add(('attribute', predtype, value))
self.entity_type = None
if 'type' in language:
self.entity_type = Dmrs.parse(language['type']['dmrs'])
self.unused.add(('type',))
self.selectors = dict()
self.selector_by_key = dict()
self.unique_selector = None
if 'selectors' in language:
for predtype, values in language['selectors'].items():
predtype = parse_string(predtype)
if predtype == 'unique':
self.unique_selector = Dmrs.parse(values['dmrs'])
self.unused.add(('selector', 'unique'))
continue
elif predtype not in self.selectors:
self.selectors[predtype] = dict()
for value, selector in values.items():
value = parse_string(value)
self.selectors[predtype][value] = Dmrs.parse(selector['dmrs'])
assert selector['key'] not in self.selector_by_key
self.selector_by_key[selector['key']] = (predtype, value)
self.unused.add(('selector', predtype, value))
self.relations = dict()
self.relation_by_key = dict()
self.attribute_relation = None
self.type_relation = None
if 'relations' in language:
for predtype, values in language['relations'].items():
predtype = parse_string(predtype)
if predtype == 'attribute':
self.attribute_relation = Dmrs.parse(values['dmrs'])
self.unused.add(('relation', 'attribute'))
continue
elif predtype == 'type':
self.type_relation = Dmrs.parse(values['dmrs'])
self.unused.add(('relation', 'type'))
continue
elif predtype not in self.relations:
self.relations[predtype] = dict()
for value, relation in values.items():
value = parse_string(value)
self.relations[predtype][value] = Dmrs.parse(relation['dmrs'])
assert relation['key'] not in self.relation_by_key
self.relation_by_key[relation['key']] = (predtype, value)
self.unused.add(('relation', predtype, value))
self.existential = None
self.type_existential = None
self.selector_existential = None
if 'existential' in language:
if 'type' in language['existential']:
self.type_existential = Dmrs.parse(language['existential']['type']['dmrs'])
self.unused.add(('existential', 'type'))
if 'selector' in language['existential']:
self.selector_existential = Dmrs.parse(language['existential']['selector']['dmrs'])
self.unused.add(('existential', 'selector'))
self.quantifiers = dict()
self.quantifier_by_key = dict()
if 'quantifiers' in language:
for qtype, qranges in language['quantifiers'].items():
qtype = parse_string(qtype)
if qtype not in self.quantifiers:
self.quantifiers[qtype] = dict()
if qtype == 'composed':
for identifier, quantifier in qranges.items():
identifier = parse_string(identifier)
definition = tuple((str(qtype), str(qrange), quantity) for qtype, qrange, quantity in quantifier.pop('definition'))
self.quantifiers[qtype][identifier] = {definition: Dmrs.parse(quantifier['dmrs'])}
assert identifier not in self.quantifier_by_key
self.quantifier_by_key[identifier] = (qtype, identifier, definition)
self.unused.add(('quantifier', qtype, identifier, definition))
continue
for qrange, quantities in qranges.items():
qrange = parse_string(qrange)
if qrange not in self.quantifiers[qtype]:
self.quantifiers[qtype][qrange] = dict()
for quantity, quantifier in quantities.items():
quantity = parse_string(quantity)
self.quantifiers[qtype][qrange][quantity] = Dmrs.parse(quantifier['dmrs'])
assert quantifier['key'] not in self.quantifier_by_key
self.quantifier_by_key[quantifier['key']] = (qtype, qrange, quantity)
self.unused.add(('quantifier', qtype, qrange, quantity))
self.number_bounds = dict()
self.number_bound_by_key = dict()
if 'number-bounds' in language:
for bound, number_bound in language['number-bounds'].items():
bound = parse_string(bound)
self.number_bounds[bound] = Dmrs.parse(number_bound['dmrs'])
assert number_bound['key'] not in self.number_bound_by_key
self.number_bound_by_key[number_bound['key']] = (bound,)
self.unused.add(('number-bound', bound))
self.comparative_quantifiers = dict()
self.comparative_quantifier_by_key = dict()
if 'comparative-quantifiers' in language:
for qtype, qranges in language['comparative-quantifiers'].items():
qtype = parse_string(qtype)
if qtype not in self.comparative_quantifiers:
self.comparative_quantifiers[qtype] = dict()
if qtype == 'composed':
for identifier, comparative_quantifier in qranges.items():
identifier = parse_string(identifier)
definition = tuple((str(qtype), str(qrange), quantity) for qtype, qrange, quantity in comparative_quantifier.pop('definition'))
self.comparative_quantifiers[qtype][identifier] = {definition: Dmrs.parse(comparative_quantifier['dmrs'])}
assert identifier not in self.comparative_quantifier_by_key
self.comparative_quantifier_by_key[identifier] = (qtype, identifier, definition)
self.unused.add(('comparative-quantifier', qtype, identifier, definition))
continue
for qrange, quantities in qranges.items():
qrange = parse_string(qrange)
if qrange not in self.comparative_quantifiers[qtype]:
self.comparative_quantifiers[qtype][qrange] = dict()
for quantity, quantifier in quantities.items():
quantity = parse_string(quantity)
self.comparative_quantifiers[qtype][qrange][quantity] = Dmrs.parse(quantifier['dmrs'])
assert quantifier['key'] not in self.comparative_quantifier_by_key
self.comparative_quantifier_by_key[quantifier['key']] = (qtype, qrange, quantity)
self.unused.add(('comparative-quantifier', qtype, qrange, quantity))
self.propositions = dict()
self.proposition_by_key = dict()
for connective, proposition in language['propositions'].items():
connective = parse_string(connective)
if isinstance(proposition['dmrs'], list):
self.propositions[connective] = tuple(Dmrs.parse(dmrs) for dmrs in proposition['dmrs'])
else:
self.propositions[connective] = Dmrs.parse(proposition['dmrs'])
assert proposition['key'] not in self.proposition_by_key
self.proposition_by_key[proposition['key']] = connective
self.unused.add(('proposition', connective))
self.hierarchy = language['hierarchy']
self.pre_processing = list()
self.pre_processing_by_key = dict()
for n, paraphrase in enumerate(reversed(language['post-processing'])):
search = Dmrs.parse(paraphrase['replace'])
replace = Dmrs.parse(paraphrase.get('reverse', paraphrase['search']))
disable_hierarchy = paraphrase.get('disable_hierarchy', False)
match_top_index = paraphrase.get('match_top_index', False)
self.pre_processing.append((search, replace, disable_hierarchy, match_top_index))
assert paraphrase['key'] not in self.pre_processing_by_key
self.pre_processing_by_key[paraphrase['key']] = n
def analyze(self, sentences):
captions = list()
mrs_iter_iter = self.ace.parse(sentence_list=sentences)
for mrs_iter in mrs_iter_iter:
for k, mrs in enumerate(mrs_iter):
if mrs is None:
continue
try:
dmrs = mrs.convert_to(cls=Dmrs, copy_nodes=True)
except Exception:
continue
analyses = self.analyze2(dmrs=dmrs)
try:
captions.append(next(analyses))
break
except StopIteration:
continue
else:
captions.append(None)
return captions
def analyze2(self, dmrs):
# print(dmrs.dumps_xml())
for search, replace, disable_hierarchy, match_top_index in self.pre_processing:
dmrs = dmrs.apply_paraphrases(paraphrases=[(search, replace)], hierarchy=(None if disable_hierarchy else self.hierarchy), match_top_index=match_top_index)
# if any(str(node.pred) == '_if+and+only+if_x_1' for node in dmrs.iter_nodes()) and all(str(node.pred) != 'generic_entity' for node in dmrs.iter_nodes()):
# print(dmrs.dumps_xml())
# if any(str(node.pred) == 'much-many_a' for node in dmrs.iter_nodes()) and all(str(node.pred) != 'generic_entity' for node in dmrs.iter_nodes()) and all(str(node.pred) != 'number_q' for node in dmrs.iter_nodes()) and all(str(node.pred) != 'loc_nonsp' for node in dmrs.iter_nodes()):
# print(dmrs.dumps_xml())
# print('analyse', dmrs.dumps_xml())
# for caption_type in ('attribute', 'type', 'relation', 'existential', 'quantifier', 'number_bound', 'comparative_quantifier'):
for caption, caption_dmrs in self.caption_with_dmrs(dmrs=dmrs):
matches = list(dmrs_exact_matching(sub_dmrs=caption_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=True))
assert len(matches) <= 1
if len(matches) == 1 and len(caption_dmrs) == len(dmrs) and all(dmrs[matches[0][nodeid]].pred == caption_dmrs[nodeid].pred for nodeid in caption_dmrs):
yield caption
def attribute_caption(self, dmrs):
# predtype: relation
matches = list(dmrs_exact_matching(sub_dmrs=self.relation_attribute, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('attribute > relation')
relation_nodeid = match[self.relation_attribute.anchors['attr'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=relation_nodeid)
for relation, relation_dmrs in self.relation_caption(dmrs=unmatched_dmrs):
attribute_dmrs = copy.deepcopy(self.relation_attribute)
attribute_dmrs.compose(relation_dmrs, fusion={'attr': 'rel'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=attribute_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
attribute = Attribute(predtype='relation', value=relation)
self.unused.discard(('attribute', 'relation'))
yield attribute, attribute_dmrs
# predtype: *
for predtype in self.attributes:
for value in self.attributes[predtype]:
# print(predtype, value)
# print([str(node.pred) for node in self.attributes[predtype][value].iter_nodes()])
# print([str(node.pred) for node in dmrs.iter_nodes()])
matches = list(dmrs_exact_matching(sub_dmrs=self.attributes[predtype][value], dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('attribute >', predtype, value)
attribute_dmrs = copy.deepcopy(self.attributes[predtype][value])
attribute = Attribute(predtype=predtype, value=value)
self.unused.discard(('attribute', predtype, value))
yield attribute, attribute_dmrs
def type_caption(self, dmrs): # entity_ not if suffix
matches = list(dmrs_exact_matching(sub_dmrs=self.entity_type, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('type > !')
type_nodeid = match[self.entity_type.anchors['type'].nodeid]
quant_nodeid = match[self.entity_type.anchors['quant'].nodeid]
def next_attribute_caption(type_dmrs, unmatched_dmrs):
no_match = True
for attribute, attribute_dmrs in self.attribute_caption(dmrs=unmatched_dmrs):
if type_dmrs.anchors['type'].pred != attribute_dmrs.anchors['type'].pred and not (type_dmrs.anchors['type'].pred.is_less_specific(attribute_dmrs.anchors['type'].pred, hierarchy=self.hierarchy) or type_dmrs.anchors['type'].pred.is_more_specific(attribute_dmrs.anchors['type'].pred, hierarchy=self.hierarchy)):
continue
next_type_dmrs = copy.deepcopy(type_dmrs)
next_type_dmrs.compose(attribute_dmrs, fusion={'type': 'type', 'quant': 'quant'}, hierarchy=self.hierarchy)
if all(nodeid in type_dmrs and next_type_dmrs[nodeid].pred == type_dmrs[nodeid].pred for nodeid in next_type_dmrs):
continue
matches = list(dmrs_exact_matching(sub_dmrs=next_type_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
no_match = False
next_unmatched_dmrs = copy.deepcopy(unmatched_dmrs)
next_unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in next_type_dmrs if match[nodeid] != type_nodeid and match[nodeid] != quant_nodeid)
# attributes.append(attribute)
for entity_type, next_type_dmrs in list(next_attribute_caption(type_dmrs=next_type_dmrs, unmatched_dmrs=next_unmatched_dmrs)):
entity_type.value.append(attribute)
yield entity_type, next_type_dmrs
if no_match:
entity_type = EntityType()
yield entity_type, type_dmrs
type_dmrs = copy.deepcopy(self.entity_type)
unmatched_dmrs = SubDmrs(dmrs=dmrs)
# attributes = list()
for entity_type, type_dmrs in list(next_attribute_caption(type_dmrs=type_dmrs, unmatched_dmrs=unmatched_dmrs)):
self.unused.discard(('type',))
yield entity_type, type_dmrs
def selector_caption(self, dmrs):
# predtype: unique
matches = list(dmrs_exact_matching(sub_dmrs=self.unique_selector, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('selector > unique')
unmatched_dmrs = SubDmrs(dmrs=dmrs)
for entity_type, type_dmrs in self.type_caption(dmrs=unmatched_dmrs):
selector_dmrs = copy.deepcopy(self.unique_selector)
selector_dmrs.compose(type_dmrs, fusion={'scope': 'type', 'quant': 'quant'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=selector_dmrs, dmrs=dmrs, hierarchy=self.hierarchy))
if len(matches) >= 1:
selector = Selector(predtype='unique', scope=entity_type)
self.unused.discard(('selector', 'unique'))
yield selector, selector_dmrs
# predtype: *
for predtype in self.selectors:
for value in self.selectors[predtype]:
selector_dmrs = self.selectors[predtype][value]
matches = list(dmrs_exact_matching(sub_dmrs=selector_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('selector >', predtype, value)
selector_nodeid = match[selector_dmrs.anchors['sel'].nodeid]
scope_nodeid = match[selector_dmrs.anchors['scope'].nodeid]
if predtype in Selector.comparison_selectors:
comparison_nodeid = match[selector_dmrs.anchors['comp'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=scope_nodeid, exclude=(selector_nodeid, comparison_nodeid))
else:
unmatched_dmrs = dmrs.subgraph(nodeid=scope_nodeid, exclude=(selector_nodeid,))
for scope, scope_dmrs in self.type_caption(dmrs=unmatched_dmrs):
# print('selector > scope')
scope_selector_dmrs = copy.deepcopy(selector_dmrs)
scope_selector_dmrs.compose(scope_dmrs, fusion={'scope': 'type', 'quant': 'quant'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=scope_selector_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
if predtype in Selector.comparison_selectors:
unmatched_dmrs = dmrs.subgraph(nodeid=comparison_nodeid, exclude=(selector_nodeid, scope_nodeid))
unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in scope_selector_dmrs if match[nodeid] != comparison_nodeid)
for comparison, comparison_dmrs in self.selector_caption(dmrs=unmatched_dmrs):
# print('selector > comparison')
comp_selector_dmrs = copy.deepcopy(scope_selector_dmrs)
comp_selector_dmrs.compose(comparison_dmrs, fusion={'comp': 'scope'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=comp_selector_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
selector = Selector(predtype=predtype, value=value, scope=scope, comparison=comparison)
self.unused.discard(('relation', predtype, value))
yield selector, comp_selector_dmrs
else:
selector = Selector(predtype=predtype, value=value, scope=scope)
self.unused.discard(('selector', predtype, value))
yield selector, scope_selector_dmrs
def relation_caption(self, dmrs):
# predtype: attribute
matches = list(dmrs_exact_matching(sub_dmrs=self.attribute_relation, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('relation > attribute')
relation_nodeid = match[self.attribute_relation.anchors['rel'].nodeid]
type_nodeid = match[self.attribute_relation.anchors['type'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=type_nodeid, exclude=(relation_nodeid,))
for attribute, attribute_dmrs in self.attribute_caption(dmrs=unmatched_dmrs):
relation_dmrs = copy.deepcopy(self.attribute_relation)
relation_dmrs.compose(attribute_dmrs, fusion={'type': 'type'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=relation_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
relation = Relation(predtype='attribute', value=attribute)
self.unused.discard(('relation', 'attribute'))
yield relation, relation_dmrs
# predtype: type
matches = list(dmrs_exact_matching(sub_dmrs=self.type_relation, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('relation > type')
relation_nodeid = match[self.type_relation.anchors['rel'].nodeid]
type_nodeid = match[self.type_relation.anchors['type'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=type_nodeid, exclude=(relation_nodeid,))
for entity_type, type_dmrs in self.type_caption(dmrs=unmatched_dmrs):
relation_dmrs = copy.deepcopy(self.type_relation)
relation_dmrs.compose(type_dmrs, fusion={'type': 'type'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=relation_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
relation = Relation(predtype='type', value=entity_type)
self.unused.discard(('relation', 'type'))
yield relation, relation_dmrs
# predtype: *
for predtype in self.relations:
for value in self.relations[predtype]:
relation_dmrs = self.relations[predtype][value]
matches = list(dmrs_exact_matching(sub_dmrs=relation_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('relation >', predtype, value)
# print([str(node.pred) for node in self.relations[predtype][value].iter_nodes()])
# print([str(node.pred) for node in dmrs.iter_nodes()])
relation_nodeid = match[relation_dmrs.anchors['rel'].nodeid]
reference_nodeid = match[relation_dmrs.anchors['ref'].nodeid]
if predtype in Relation.meta_relations:
unmatched_dmrs = dmrs.subgraph(nodeid=reference_nodeid, exclude=(relation_nodeid,))
unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in relation_dmrs if match[nodeid] != reference_nodeid)
else:
quantifier_nodeid = match[relation_dmrs.anchors['quant'].nodeid]
if predtype in Relation.ternary_relations:
comparison_nodeid = match[relation_dmrs.anchors['comp'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=reference_nodeid, exclude=(relation_nodeid, comparison_nodeid))
else:
unmatched_dmrs = dmrs.subgraph(nodeid=reference_nodeid, exclude=(relation_nodeid,))
unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in relation_dmrs if match[nodeid] not in (reference_nodeid, quantifier_nodeid))
if predtype in Relation.meta_relations:
reference_iter = self.relation_caption(dmrs=unmatched_dmrs)
else:
reference_iter = self.type_caption(dmrs=unmatched_dmrs)
for reference, reference_dmrs in reference_iter:
# print('relation > reference')
ref_relation_dmrs = copy.deepcopy(relation_dmrs)
if predtype in Relation.meta_relations:
ref_relation_dmrs.compose(reference_dmrs, fusion={'ref': 'rel'}, hierarchy=self.hierarchy)
else:
ref_relation_dmrs.compose(reference_dmrs, fusion={'ref': 'type', 'quant': 'quant'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=ref_relation_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
if predtype in Relation.ternary_relations:
unmatched_dmrs = dmrs.subgraph(nodeid=comparison_nodeid, exclude=(relation_nodeid, reference_nodeid))
unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in ref_relation_dmrs if match[nodeid] != comparison_nodeid)
for comparison, comparison_dmrs in self.selector_caption(dmrs=unmatched_dmrs):
# print('relation > comparison')
comp_relation_dmrs = copy.deepcopy(ref_relation_dmrs)
comp_relation_dmrs.compose(comparison_dmrs, fusion={'comp': 'scope'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=comp_relation_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
relation = Relation(predtype=predtype, value=value, reference=reference, comparison=comparison)
self.unused.discard(('relation', predtype, value))
yield relation, comp_relation_dmrs
else:
relation = Relation(predtype=predtype, value=value, reference=reference)
self.unused.discard(('relation', predtype, value))
yield relation, ref_relation_dmrs
def existential_caption(self, dmrs):
matches = list(dmrs_exact_matching(sub_dmrs=self.type_existential, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('existential type > restrictor')
restrictor_nodeid = match[self.type_existential.anchors['rstr'].nodeid]
body_nodeid = match[self.type_existential.anchors['body'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=restrictor_nodeid, exclude=(body_nodeid,)) # dmrs.index.nodeid, dmrs.top.nodeid
for restrictor, restrictor_dmrs in self.type_caption(dmrs=unmatched_dmrs):
rstr_existential_dmrs = copy.deepcopy(self.type_existential)
rstr_existential_dmrs.compose(restrictor_dmrs, fusion={'rstr': 'type', 'quant': 'quant'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=rstr_existential_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
# print('existential type > body')
for match in matches:
unmatched_dmrs = dmrs.subgraph(nodeid=body_nodeid, exclude=(restrictor_nodeid,)) # dmrs.index.nodeid, dmrs.top.nodeid
unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in rstr_existential_dmrs if match[nodeid] != body_nodeid)
for body, body_dmrs in self.relation_caption(dmrs=unmatched_dmrs):
body_existential_dmrs = copy.deepcopy(rstr_existential_dmrs)
body_existential_dmrs.compose(body_dmrs, fusion={'body': 'rel'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=body_existential_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
existential = Existential(restrictor=restrictor, body=body)
self.unused.discard(('existential', 'type'))
yield existential, body_existential_dmrs
matches = list(dmrs_exact_matching(sub_dmrs=self.selector_existential, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('existential selector > restrictor')
restrictor_nodeid = match[self.selector_existential.anchors['rstr'].nodeid]
body_nodeid = match[self.selector_existential.anchors['body'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=restrictor_nodeid, exclude=(body_nodeid,)) # dmrs.index.nodeid, dmrs.top.nodeid
for restrictor, restrictor_dmrs in self.selector_caption(dmrs=unmatched_dmrs):
rstr_existential_dmrs = copy.deepcopy(self.selector_existential)
rstr_existential_dmrs.compose(restrictor_dmrs, fusion={'rstr': 'scope'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=rstr_existential_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
# print('existential selector > body')
for match in matches:
unmatched_dmrs = dmrs.subgraph(nodeid=body_nodeid, exclude=(restrictor_nodeid,)) # dmrs.index.nodeid, dmrs.top.nodeid
unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in rstr_existential_dmrs if match[nodeid] != body_nodeid)
for body, body_dmrs in self.relation_caption(dmrs=unmatched_dmrs):
body_existential_dmrs = copy.deepcopy(rstr_existential_dmrs)
body_existential_dmrs.compose(body_dmrs, fusion={'body': 'rel'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=body_existential_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
existential = Existential(restrictor=restrictor, body=body)
self.unused.discard(('existential', 'selector'))
yield existential, body_existential_dmrs
def quantifier_caption(self, dmrs):
for qtype in self.quantifiers:
for qrange in self.quantifiers[qtype]:
for quantity in self.quantifiers[qtype][qrange]:
# if any(str(node.pred) == '_at+least_x_deg' for node in dmrs.iter_nodes()) and any(str(node.pred) == '_quarter_n_of' for node in dmrs.iter_nodes()) and any(str(node.pred) == '_at+least_x_deg' for node in self.quantifiers[qtype][qrange][quantity].iter_nodes()):
# print([str(node) for node in dmrs.iter_nodes()])
# print([str(node) for node in self.quantifiers[qtype][qrange][quantity].iter_nodes()])
quantifier_dmrs = self.quantifiers[qtype][qrange][quantity]
matches = list(dmrs_exact_matching(sub_dmrs=quantifier_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('quantifier > restrictor', qtype, qrange, quantity)
restrictor_nodeid = match[quantifier_dmrs.anchors['rstr'].nodeid]
body_nodeid = match[quantifier_dmrs.anchors['body'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=restrictor_nodeid, exclude=(body_nodeid,)) # dmrs.index.nodeid, dmrs.top.nodeid
for restrictor, restrictor_dmrs in self.type_caption(dmrs=unmatched_dmrs): # only one?
rstr_quantifier_dmrs = copy.deepcopy(quantifier_dmrs)
rstr_quantifier_dmrs.compose(restrictor_dmrs, fusion={'rstr': 'type', 'quant': 'quant'}, hierarchy=self.hierarchy)
# print('rstr', rstr_quantifier_dmrs.dumps_xml())
# print('dmrs', dmrs.dumps_xml())
matches = list(dmrs_exact_matching(sub_dmrs=rstr_quantifier_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('quantifier > body')
unmatched_dmrs = dmrs.subgraph(nodeid=body_nodeid, exclude=(restrictor_nodeid,)) # dmrs.index.nodeid, dmrs.top.nodeid
unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in rstr_quantifier_dmrs if match[nodeid] != body_nodeid)
for body, body_dmrs in self.relation_caption(dmrs=unmatched_dmrs): # only one?
body_quantifier_dmrs = copy.deepcopy(rstr_quantifier_dmrs)
body_quantifier_dmrs.compose(body_dmrs, fusion={'body': 'rel'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=body_quantifier_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
quantifier = Quantifier(qtype=qtype, qrange=qrange, quantity=quantity, restrictor=restrictor, body=body)
self.unused.discard(('quantifier', qtype, qrange, quantity))
yield quantifier, body_quantifier_dmrs
def number_bound_caption(self, dmrs):
for qtype in self.quantifiers:
for qrange in self.quantifiers[qtype]:
for quantity in self.quantifiers[qtype][qrange]:
quantifier_dmrs = self.quantifiers[qtype][qrange][quantity]
matches = list(dmrs_exact_matching(sub_dmrs=quantifier_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('number-bound > quantifier > restrictor', qtype, qrange, quantity)
restrictor_nodeid = match[quantifier_dmrs.anchors['rstr'].nodeid]
body_nodeid = match[quantifier_dmrs.anchors['body'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=restrictor_nodeid, exclude=(body_nodeid,)) # dmrs.index.nodeid, dmrs.top.nodeid
for bound in self.number_bounds:
bound_dmrs = self.number_bounds[bound]
matches = list(dmrs_exact_matching(sub_dmrs=bound_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('number-bound >', bound)
scope_nodeid = match[bound_dmrs.anchors['scope'].nodeid]
type_nodeid = match[bound_dmrs.anchors['type'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=scope_nodeid, exclude=(type_nodeid, body_nodeid,)) # dmrs.index.nodeid, dmrs.top.nodeid
for restrictor, restrictor_dmrs in self.type_caption(dmrs=unmatched_dmrs):
rstr_bound_dmrs = copy.deepcopy(bound_dmrs)
rstr_bound_dmrs.compose(restrictor_dmrs, fusion={'scope': 'type', 'tquant': 'quant'}, hierarchy=self.hierarchy)
rstr_quantifier_dmrs = copy.deepcopy(quantifier_dmrs)
rstr_quantifier_dmrs.compose(rstr_bound_dmrs, fusion={'rstr': 'type', 'quant': 'quant'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=rstr_quantifier_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('number-bound > quantifier > body')
unmatched_dmrs = dmrs.subgraph(nodeid=body_nodeid, exclude=(restrictor_nodeid,)) # dmrs.index.nodeid, dmrs.top.nodeid
unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in rstr_quantifier_dmrs if match[nodeid] != body_nodeid)
for body, body_dmrs in self.relation_caption(dmrs=unmatched_dmrs): # only one?
body_quantifier_dmrs = copy.deepcopy(rstr_quantifier_dmrs)
body_quantifier_dmrs.compose(body_dmrs, fusion={'body': 'rel'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=body_quantifier_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
quantifier = Quantifier(qtype=qtype, qrange=qrange, quantity=quantity, restrictor=restrictor, body=body)
number_bound = NumberBound(bound=bound, quantifier=quantifier)
self.unused.discard(('number-bound', bound))
yield number_bound, body_quantifier_dmrs
def comparative_quantifier_caption(self, dmrs):
for qtype in self.comparative_quantifiers:
for qrange in self.comparative_quantifiers[qtype]:
for quantity in self.comparative_quantifiers[qtype][qrange]:
quantifier_dmrs = self.comparative_quantifiers[qtype][qrange][quantity]
matches = list(dmrs_exact_matching(sub_dmrs=quantifier_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('comparative-quantifier > restrictor')
restrictor_nodeid = match[quantifier_dmrs.anchors['rstr'].nodeid]
restrictor_quantifier_nodeid = match[quantifier_dmrs.anchors['rquant'].nodeid]
comparison_nodeid = match[quantifier_dmrs.anchors['comp'].nodeid]
comparison_quantifier_nodeid = match[quantifier_dmrs.anchors['cquant'].nodeid]
body_nodeid = match[quantifier_dmrs.anchors['body'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=restrictor_nodeid, exclude=(comparison_nodeid, comparison_quantifier_nodeid, body_nodeid)) # dmrs.index.nodeid, dmrs.top.nodeid
for restrictor, restrictor_dmrs in self.type_caption(dmrs=unmatched_dmrs):
rstr_quantifier_dmrs = copy.deepcopy(quantifier_dmrs)
rstr_quantifier_dmrs.compose(restrictor_dmrs, fusion={'rstr': 'type', 'rquant': 'quant'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=rstr_quantifier_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
# print('comparative-quantifier > comparison')
for match in matches:
unmatched_dmrs = dmrs.subgraph(nodeid=comparison_nodeid, exclude=(restrictor_nodeid, restrictor_quantifier_nodeid, body_nodeid)) # dmrs.index.nodeid, dmrs.top.nodeid
unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in rstr_quantifier_dmrs if match[nodeid] not in (comparison_nodeid, comparison_quantifier_nodeid))
for comparison, comparison_dmrs in self.type_caption(dmrs=unmatched_dmrs):
comp_quantifier_dmrs = copy.deepcopy(rstr_quantifier_dmrs)
comp_quantifier_dmrs.compose(comparison_dmrs, fusion={'comp': 'type', 'cquant': 'quant'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=comp_quantifier_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
# print('comparative-quantifier > body')
for match in matches:
unmatched_dmrs = dmrs.subgraph(nodeid=body_nodeid, exclude=(restrictor_nodeid, restrictor_quantifier_nodeid, comparison_nodeid, comparison_quantifier_nodeid)) # dmrs.index.nodeid, dmrs.top.nodeid
unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in comp_quantifier_dmrs if match[nodeid] != body_nodeid)
for body, body_dmrs in self.relation_caption(dmrs=unmatched_dmrs):
body_quantifier_dmrs = copy.deepcopy(comp_quantifier_dmrs)
body_quantifier_dmrs.compose(body_dmrs, fusion={'body': 'rel'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=body_quantifier_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
comparative_quantifier = ComparativeQuantifier(qtype=qtype, qrange=qrange, quantity=quantity, restrictor=restrictor, comparison=comparison, body=body)
self.unused.discard(('comparative-quantifier', qtype, qrange, quantity))
yield comparative_quantifier, body_quantifier_dmrs
def proposition_caption(self, dmrs):
for proptype in self.propositions:
if proptype in ('attribute', 'type', 'selector', 'relation', 'existential', 'quantifier', 'number-bound', 'comparative-quantifier'):
continue
proposition_dmrs = self.propositions[proptype]
matches = list(dmrs_exact_matching(sub_dmrs=proposition_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('proposition > arg1')
head_nodeid = match[proposition_dmrs.anchors['head'].nodeid]
arg1_nodeid = match[proposition_dmrs.anchors['arg1'].nodeid]
arg2_nodeid = match[proposition_dmrs.anchors['arg2'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=arg1_nodeid, exclude=(head_nodeid, arg2_nodeid)) # dmrs.index.nodeid, dmrs.top.nodeid
for arg1, arg1_dmrs in self.caption_with_dmrs(dmrs=unmatched_dmrs):
arg1_proposition_dmrs = copy.deepcopy(proposition_dmrs)
arg1_proposition_dmrs.compose(arg1_dmrs, fusion={'arg1': 'head'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=arg1_proposition_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('proposition > arg2')
unmatched_dmrs = dmrs.subgraph(nodeid=arg2_nodeid, exclude=(head_nodeid, arg1_nodeid)) # dmrs.index.nodeid, dmrs.top.nodeid
unmatched_dmrs.remove_nodes(match[nodeid] for nodeid in arg1_proposition_dmrs if match[nodeid] != arg2_nodeid)
for arg2, arg2_dmrs in self.caption_with_dmrs(dmrs=unmatched_dmrs):
arg2_proposition_dmrs = copy.deepcopy(arg1_proposition_dmrs)
arg2_proposition_dmrs.compose(arg2_dmrs, fusion={'arg2': 'head'}, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=arg2_proposition_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) >= 1:
proposition = Proposition(proptype=proptype, clauses=(arg1, arg2))
self.unused.discard(('proposition', proptype))
yield proposition, arg2_proposition_dmrs
def caption_with_dmrs(self, dmrs):
yield from self.proposition_caption(dmrs=dmrs)
if 'comparative-quantifier' in self.propositions:
matches = list(dmrs_exact_matching(sub_dmrs=self.propositions['comparative-quantifier'], dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('caption > comparative-quantifier')
quantifier_nodeid = match[self.propositions['comparative-quantifier'].anchors['head'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=quantifier_nodeid) # , dmrs.top.nodeid
for comparative_quantifier, quantifier_dmrs in self.comparative_quantifier_caption(dmrs=unmatched_dmrs): # only one?
caption_dmrs = copy.deepcopy(self.propositions['comparative-quantifier'])
caption_dmrs.compose(quantifier_dmrs, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=caption_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) == 1:
self.unused.discard(('proposition', 'comparative-quantifier'))
yield comparative_quantifier, caption_dmrs
if 'number-bound' in self.propositions:
matches = list(dmrs_exact_matching(sub_dmrs=self.propositions['number-bound'], dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('caption > number-bound')
bound_nodeid = match[self.propositions['number-bound'].anchors['head'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=bound_nodeid) # , dmrs.top.nodeid
for number_bound, bound_dmrs in self.number_bound_caption(dmrs=unmatched_dmrs): # only one?
caption_dmrs = copy.deepcopy(self.propositions['number-bound'])
caption_dmrs.compose(bound_dmrs, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=caption_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) == 1:
self.unused.discard(('proposition', 'number-bound'))
yield number_bound, caption_dmrs
if 'quantifier' in self.propositions:
matches = list(dmrs_exact_matching(sub_dmrs=self.propositions['quantifier'], dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('caption > quantifier')
quantifier_nodeid = match[self.propositions['quantifier'].anchors['head'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=quantifier_nodeid) # , dmrs.top.nodeid
for quantifier, quantifier_dmrs in self.quantifier_caption(dmrs=unmatched_dmrs): # only one?
caption_dmrs = copy.deepcopy(self.propositions['quantifier'])
caption_dmrs.compose(quantifier_dmrs, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=caption_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) == 1:
self.unused.discard(('proposition', 'quantifier'))
yield quantifier, caption_dmrs
if 'existential' in self.propositions:
matches = list(dmrs_exact_matching(sub_dmrs=self.propositions['existential'], dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('caption > existential')
existential_nodeid = match[self.propositions['existential'].anchors['head'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=existential_nodeid) # , dmrs.top.nodeid
for existential, existential_dmrs in self.existential_caption(dmrs=unmatched_dmrs): # only one?
caption_dmrs = copy.deepcopy(self.propositions['existential'])
caption_dmrs.compose(existential_dmrs, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=caption_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) == 1:
self.unused.discard(('proposition', 'existential'))
yield existential, caption_dmrs
if 'relation' in self.propositions:
matches = list(dmrs_exact_matching(sub_dmrs=self.propositions['relation'], dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('caption > relation')
relation_nodeid = match[self.propositions['relation'].anchors['rel'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=relation_nodeid) # , dmrs.top.nodeid
for relation, relation_dmrs in self.relation_caption(dmrs=unmatched_dmrs): # only one?
caption_dmrs = copy.deepcopy(self.propositions['relation'])
caption_dmrs.compose(relation_dmrs, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=caption_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) == 1:
self.unused.discard(('proposition', 'relation'))
yield relation, caption_dmrs
if 'selector' in self.propositions:
matches = list(dmrs_exact_matching(sub_dmrs=self.propositions['selector'], dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('caption > selector')
scope_nodeid = match[self.propositions['selector'].anchors['scope'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=scope_nodeid) # , dmrs.top.nodeid
for selector, selector_dmrs in self.selector_caption(dmrs=unmatched_dmrs): # only one?
caption_dmrs = copy.deepcopy(self.propositions['selector'])
caption_dmrs.compose(selector_dmrs, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=caption_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) == 1:
self.unused.discard(('proposition', 'selector'))
yield selector, caption_dmrs
if 'type' in self.propositions:
matches = list(dmrs_exact_matching(sub_dmrs=self.propositions['type'], dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('caption > type')
type_nodeid = match[self.propositions['type'].anchors['type'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=type_nodeid) # , dmrs.top.nodeid
for entity_type, type_dmrs in self.type_caption(dmrs=unmatched_dmrs): # only one?
caption_dmrs = copy.deepcopy(self.propositions['type'])
caption_dmrs.compose(type_dmrs, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=caption_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) == 1:
self.unused.discard(('proposition', 'type'))
yield entity_type, caption_dmrs
if 'attribute' in self.propositions:
matches = list(dmrs_exact_matching(sub_dmrs=self.propositions['attribute'], dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
for match in matches:
# print('caption > attribute')
attribute_nodeid = match[self.propositions['attribute'].anchors['type'].nodeid]
unmatched_dmrs = dmrs.subgraph(nodeid=attribute_nodeid) # , dmrs.top.nodeid
for attribute, attribute_dmrs in self.attribute_caption(dmrs=unmatched_dmrs):
caption_dmrs = copy.deepcopy(self.propositions['attribute'])
caption_dmrs.compose(attribute_dmrs, hierarchy=self.hierarchy)
matches = list(dmrs_exact_matching(sub_dmrs=caption_dmrs, dmrs=dmrs, hierarchy=self.hierarchy, match_top_index=False))
if len(matches) == 1:
self.unused.discard(('proposition', 'attribute'))
yield attribute, caption_dmrs
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import urllib
from tempest.api_schema.response.compute import servers as common_schema
from tempest.api_schema.response.compute.v2 import servers as schema
from tempest.common import rest_client
from tempest.common import waiters
from tempest import config
from tempest import exceptions
CONF = config.CONF
class ServersClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(ServersClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_type
def create_server(self, name, image_ref, flavor_ref, **kwargs):
"""
Creates an instance of a server.
name (Required): The name of the server.
image_ref (Required): Reference to the image used to build the server.
flavor_ref (Required): The flavor used to build the server.
Following optional keyword arguments are accepted:
adminPass: Sets the initial root password.
key_name: Key name of keypair that was created earlier.
meta: A dictionary of values to be used as metadata.
personality: A list of dictionaries for files to be injected into
the server.
security_groups: A list of security group dicts.
networks: A list of network dicts with UUID and fixed_ip.
user_data: User data for instance.
availability_zone: Availability zone in which to launch instance.
accessIPv4: The IPv4 access address for the server.
accessIPv6: The IPv6 access address for the server.
min_count: Count of minimum number of instances to launch.
max_count: Count of maximum number of instances to launch.
disk_config: Determines if user or admin controls disk configuration.
return_reservation_id: Enable/Disable the return of reservation id
block_device_mapping: Block device mapping for the server.
block_device_mapping_v2: Block device mapping V2 for the server.
"""
post_body = {
'name': name,
'imageRef': image_ref,
'flavorRef': flavor_ref
}
for option in ['personality', 'adminPass', 'key_name',
'security_groups', 'networks', 'user_data',
'availability_zone', 'accessIPv4', 'accessIPv6',
'min_count', 'max_count', ('metadata', 'meta'),
('OS-DCF:diskConfig', 'disk_config'),
'return_reservation_id', 'block_device_mapping',
'block_device_mapping_v2']:
if isinstance(option, tuple):
post_param = option[0]
key = option[1]
else:
post_param = option
key = option
value = kwargs.get(key)
if value is not None:
post_body[post_param] = value
post_body = {'server': post_body}
if 'sched_hints' in kwargs:
hints = {'os:scheduler_hints': kwargs.get('sched_hints')}
post_body = dict(post_body.items() + hints.items())
post_body = json.dumps(post_body)
resp, body = self.post('servers', post_body)
body = json.loads(body)
# NOTE(maurosr): this deals with the case of multiple server create
# with return reservation id set True
if 'reservation_id' in body:
return resp, body
if CONF.compute_feature_enabled.enable_instance_password:
create_schema = schema.create_server_with_admin_pass
else:
create_schema = schema.create_server
self.validate_response(create_schema, resp, body)
return resp, body['server']
def update_server(self, server_id, name=None, meta=None, accessIPv4=None,
accessIPv6=None, disk_config=None):
"""
Updates the properties of an existing server.
server_id: The id of an existing server.
name: The name of the server.
personality: A list of files to be injected into the server.
accessIPv4: The IPv4 access address for the server.
accessIPv6: The IPv6 access address for the server.
"""
post_body = {}
if meta is not None:
post_body['metadata'] = meta
if name is not None:
post_body['name'] = name
if accessIPv4 is not None:
post_body['accessIPv4'] = accessIPv4
if accessIPv6 is not None:
post_body['accessIPv6'] = accessIPv6
if disk_config is not None:
post_body['OS-DCF:diskConfig'] = disk_config
post_body = json.dumps({'server': post_body})
resp, body = self.put("servers/%s" % str(server_id), post_body)
body = json.loads(body)
self.validate_response(schema.update_server, resp, body)
return resp, body['server']
def get_server(self, server_id):
"""Returns the details of an existing server."""
resp, body = self.get("servers/%s" % str(server_id))
body = json.loads(body)
self.validate_response(schema.get_server, resp, body)
return resp, body['server']
def delete_server(self, server_id):
"""Deletes the given server."""
resp, body = self.delete("servers/%s" % str(server_id))
self.validate_response(common_schema.delete_server, resp, body)
return resp, body
def list_servers(self, params=None):
"""Lists all servers for a user."""
url = 'servers'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(common_schema.list_servers, resp, body)
return resp, body
def list_servers_with_detail(self, params=None):
"""Lists all servers in detail for a user."""
url = 'servers/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_servers_detail, resp, body)
return resp, body
def wait_for_server_status(self, server_id, status, extra_timeout=0,
raise_on_error=True):
"""Waits for a server to reach a given status."""
return waiters.wait_for_server_status(self, server_id, status,
extra_timeout=extra_timeout,
raise_on_error=raise_on_error)
def wait_for_server_termination(self, server_id, ignore_error=False):
"""Waits for server to reach termination."""
start_time = int(time.time())
while True:
try:
resp, body = self.get_server(server_id)
except exceptions.NotFound:
return
server_status = body['status']
if server_status == 'ERROR' and not ignore_error:
raise exceptions.BuildErrorException(server_id=server_id)
if int(time.time()) - start_time >= self.build_timeout:
raise exceptions.TimeoutException
time.sleep(self.build_interval)
def list_addresses(self, server_id):
"""Lists all addresses for a server."""
resp, body = self.get("servers/%s/ips" % str(server_id))
body = json.loads(body)
self.validate_response(schema.list_addresses, resp, body)
return resp, body['addresses']
def list_addresses_by_network(self, server_id, network_id):
"""Lists all addresses of a specific network type for a server."""
resp, body = self.get("servers/%s/ips/%s" %
(str(server_id), network_id))
body = json.loads(body)
self.validate_response(schema.list_addresses_by_network, resp, body)
return resp, body
def action(self, server_id, action_name, response_key,
schema=common_schema.server_actions_common_schema, **kwargs):
post_body = json.dumps({action_name: kwargs})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
if response_key is not None:
body = json.loads(body)
# Check for Schema as 'None' because if we do not have any server
# action schema implemented yet then they can pass 'None' to skip
# the validation.Once all server action has their schema
# implemented then, this check can be removed if every actions are
# supposed to validate their response.
# TODO(GMann): Remove the below 'if' check once all server actions
# schema are implemented.
if schema is not None:
self.validate_response(schema, resp, body)
body = body[response_key]
else:
self.validate_response(schema, resp, body)
return resp, body
def create_backup(self, server_id, backup_type, rotation, name):
"""Backup a server instance."""
return self.action(server_id, "createBackup", None,
backup_type=backup_type,
rotation=rotation,
name=name)
def change_password(self, server_id, adminPass):
"""Changes the root password for the server."""
return self.action(server_id, 'changePassword', None,
adminPass=adminPass)
def get_password(self, server_id):
resp, body = self.get("servers/%s/os-server-password" %
str(server_id))
body = json.loads(body)
self.validate_response(common_schema.get_password, resp, body)
return resp, body
def delete_password(self, server_id):
"""
Removes the encrypted server password from the metadata server
Note that this does not actually change the instance server
password.
"""
resp, body = self.delete("servers/%s/os-server-password" %
str(server_id))
self.validate_response(common_schema.server_actions_delete_password,
resp, body)
return resp, body
def reboot(self, server_id, reboot_type):
"""Reboots a server."""
return self.action(server_id, 'reboot', None, type=reboot_type)
def rebuild(self, server_id, image_ref, **kwargs):
"""Rebuilds a server with a new image."""
kwargs['imageRef'] = image_ref
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs['disk_config']
del kwargs['disk_config']
if CONF.compute_feature_enabled.enable_instance_password:
rebuild_schema = schema.rebuild_server_with_admin_pass
else:
rebuild_schema = schema.rebuild_server
return self.action(server_id, 'rebuild', 'server',
rebuild_schema, **kwargs)
def resize(self, server_id, flavor_ref, **kwargs):
"""Changes the flavor of a server."""
kwargs['flavorRef'] = flavor_ref
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs['disk_config']
del kwargs['disk_config']
return self.action(server_id, 'resize', None, **kwargs)
def confirm_resize(self, server_id, **kwargs):
"""Confirms the flavor change for a server."""
return self.action(server_id, 'confirmResize',
None, schema.server_actions_confirm_resize,
**kwargs)
def revert_resize(self, server_id, **kwargs):
"""Reverts a server back to its original flavor."""
return self.action(server_id, 'revertResize', None, **kwargs)
def list_server_metadata(self, server_id):
resp, body = self.get("servers/%s/metadata" % str(server_id))
body = json.loads(body)
self.validate_response(common_schema.list_server_metadata, resp, body)
return resp, body['metadata']
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
if no_metadata_field:
post_body = ""
else:
post_body = json.dumps({'metadata': meta})
resp, body = self.put('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
self.validate_response(common_schema.set_server_metadata, resp, body)
return resp, body['metadata']
def update_server_metadata(self, server_id, meta):
post_body = json.dumps({'metadata': meta})
resp, body = self.post('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
self.validate_response(common_schema.update_server_metadata,
resp, body)
return resp, body['metadata']
def get_server_metadata_item(self, server_id, key):
resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
body = json.loads(body)
self.validate_response(schema.set_get_server_metadata_item,
resp, body)
return resp, body['meta']
def set_server_metadata_item(self, server_id, key, meta):
post_body = json.dumps({'meta': meta})
resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
post_body)
body = json.loads(body)
self.validate_response(schema.set_get_server_metadata_item,
resp, body)
return resp, body['meta']
def delete_server_metadata_item(self, server_id, key):
resp, body = self.delete("servers/%s/metadata/%s" %
(str(server_id), key))
self.validate_response(common_schema.delete_server_metadata_item,
resp, body)
return resp, body
def stop(self, server_id, **kwargs):
return self.action(server_id, 'os-stop', None, **kwargs)
def start(self, server_id, **kwargs):
return self.action(server_id, 'os-start', None, **kwargs)
def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
"""Attaches a volume to a server instance."""
post_body = json.dumps({
'volumeAttachment': {
'volumeId': volume_id,
'device': device,
}
})
resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
post_body)
body = json.loads(body)
self.validate_response(schema.attach_volume, resp, body)
return resp, body
def detach_volume(self, server_id, volume_id):
"""Detaches a volume from a server instance."""
resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
(server_id, volume_id))
self.validate_response(schema.detach_volume, resp, body)
return resp, body
def add_security_group(self, server_id, name):
"""Adds a security group to the server."""
return self.action(server_id, 'addSecurityGroup', None, name=name)
def remove_security_group(self, server_id, name):
"""Removes a security group from the server."""
return self.action(server_id, 'removeSecurityGroup', None, name=name)
def live_migrate_server(self, server_id, dest_host, use_block_migration):
"""This should be called with administrator privileges ."""
migrate_params = {
"disk_over_commit": False,
"block_migration": use_block_migration,
"host": dest_host
}
req_body = json.dumps({'os-migrateLive': migrate_params})
resp, body = self.post("servers/%s/action" % str(server_id), req_body)
self.validate_response(common_schema.server_actions_common_schema,
resp, body)
return resp, body
def migrate_server(self, server_id, **kwargs):
"""Migrates a server to a new host."""
return self.action(server_id, 'migrate', None, **kwargs)
def lock_server(self, server_id, **kwargs):
"""Locks the given server."""
return self.action(server_id, 'lock', None, **kwargs)
def unlock_server(self, server_id, **kwargs):
"""UNlocks the given server."""
return self.action(server_id, 'unlock', None, **kwargs)
def suspend_server(self, server_id, **kwargs):
"""Suspends the provided server."""
return self.action(server_id, 'suspend', None, **kwargs)
def resume_server(self, server_id, **kwargs):
"""Un-suspends the provided server."""
return self.action(server_id, 'resume', None, **kwargs)
def pause_server(self, server_id, **kwargs):
"""Pauses the provided server."""
return self.action(server_id, 'pause', None, **kwargs)
def unpause_server(self, server_id, **kwargs):
"""Un-pauses the provided server."""
return self.action(server_id, 'unpause', None, **kwargs)
def reset_state(self, server_id, state='error'):
"""Resets the state of a server to active/error."""
return self.action(server_id, 'os-resetState', None, state=state)
def shelve_server(self, server_id, **kwargs):
"""Shelves the provided server."""
return self.action(server_id, 'shelve', None, **kwargs)
def unshelve_server(self, server_id, **kwargs):
"""Un-shelves the provided server."""
return self.action(server_id, 'unshelve', None, **kwargs)
def shelve_offload_server(self, server_id, **kwargs):
"""Shelve-offload the provided server."""
return self.action(server_id, 'shelveOffload', None, **kwargs)
def get_console_output(self, server_id, length):
kwargs = {'length': length} if length else {}
return self.action(server_id, 'os-getConsoleOutput', 'output',
common_schema.get_console_output, **kwargs)
def list_virtual_interfaces(self, server_id):
"""
List the virtual interfaces used in an instance.
"""
resp, body = self.get('/'.join(['servers', server_id,
'os-virtual-interfaces']))
body = json.loads(body)
self.validate_response(schema.list_virtual_interfaces, resp, body)
return resp, body
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server."""
return self.action(server_id, 'rescue', 'adminPass',
schema.rescue_server, **kwargs)
def unrescue_server(self, server_id):
"""Unrescue the provided server."""
return self.action(server_id, 'unrescue', None)
def get_server_diagnostics(self, server_id):
"""Get the usage data for a server."""
resp, body = self.get("servers/%s/diagnostics" % str(server_id))
return resp, json.loads(body)
def list_instance_actions(self, server_id):
"""List the provided server action."""
resp, body = self.get("servers/%s/os-instance-actions" %
str(server_id))
body = json.loads(body)
self.validate_response(schema.list_instance_actions, resp, body)
return resp, body['instanceActions']
def get_instance_action(self, server_id, request_id):
"""Returns the action details of the provided server."""
resp, body = self.get("servers/%s/os-instance-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
self.validate_response(schema.get_instance_action, resp, body)
return resp, body['instanceAction']
def force_delete_server(self, server_id, **kwargs):
"""Force delete a server."""
return self.action(server_id, 'forceDelete', None, **kwargs)
def restore_soft_deleted_server(self, server_id, **kwargs):
"""Restore a soft-deleted server."""
return self.action(server_id, 'restore', None, **kwargs)
def reset_network(self, server_id, **kwargs):
"""Resets the Network of a server"""
return self.action(server_id, 'resetNetwork', None, **kwargs)
def inject_network_info(self, server_id, **kwargs):
"""Inject the Network Info into server"""
return self.action(server_id, 'injectNetworkInfo', None, **kwargs)
def get_vnc_console(self, server_id, console_type):
"""Get URL of VNC console."""
return self.action(server_id, "os-getVNCConsole",
"console", common_schema.get_vnc_console,
type=console_type)
def create_server_group(self, name, policies):
"""
Create the server group
name : Name of the server-group
policies : List of the policies - affinity/anti-affinity)
"""
post_body = {
'name': name,
'policies': policies,
}
post_body = json.dumps({'server_group': post_body})
resp, body = self.post('os-server-groups', post_body)
body = json.loads(body)
self.validate_response(schema.create_get_server_group, resp, body)
return resp, body['server_group']
def delete_server_group(self, server_group_id):
"""Delete the given server-group."""
resp, body = self.delete("os-server-groups/%s" % str(server_group_id))
self.validate_response(schema.delete_server_group, resp, body)
return resp, body
def list_server_groups(self):
"""List the server-groups."""
resp, body = self.get("os-server-groups")
body = json.loads(body)
self.validate_response(schema.list_server_groups, resp, body)
return resp, body['server_groups']
def get_server_group(self, server_group_id):
"""Get the details of given server_group."""
resp, body = self.get("os-server-groups/%s" % str(server_group_id))
body = json.loads(body)
self.validate_response(schema.create_get_server_group, resp, body)
return resp, body['server_group']
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, with_statement
import datetime
import django
import logging
import mock
import pytest
import re
import sys
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.signals import got_request_exception
from django.core.handlers.wsgi import WSGIRequest
from django.http import QueryDict
from django.template import TemplateSyntaxError
from django.test import TestCase
from django.test.client import Client as DjangoTestClient, ClientHandler as DjangoTestClientHandler
from django.utils.translation import gettext_lazy
from exam import fixture
try:
from django.urls import reverse
except ImportError:
# For Django version less than 1.10.
from django.core.urlresolvers import reverse
from raven.base import Client
from raven.utils.compat import StringIO, iteritems, PY2, string_types, text_type
from raven.contrib.django.client import DjangoClient, record_sql
from raven.contrib.django.celery import CeleryClient
from raven.contrib.django.handlers import SentryHandler
from raven.contrib.django.models import (
SentryDjangoHandler, client, get_client
)
from raven.contrib.django.middleware.wsgi import Sentry
from raven.contrib.django.templatetags.raven import sentry_public_dsn
from raven.contrib.django.views import is_valid_origin
from raven.transport import HTTPTransport
from raven.utils.serializer import transform
from .views import AppError
#from .models import MyTestModel
DJANGO_15 = django.VERSION >= (1, 5, 0)
DJANGO_18 = django.VERSION >= (1, 8, 0)
DJANGO_110 = django.VERSION >= (1, 10, 0)
MIDDLEWARE_ATTR = 'MIDDLEWARE' if DJANGO_110 else 'MIDDLEWARE_CLASSES'
def make_request():
return WSGIRequest(environ={
'wsgi.input': StringIO(),
'REQUEST_METHOD': 'POST',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'CONTENT_TYPE': 'text/html',
'ACCEPT': 'text/html',
})
class MockClientHandler(DjangoTestClientHandler):
def __call__(self, environ, start_response=[]):
# this pretends doesn't require start_response
return super(MockClientHandler, self).__call__(environ)
class MockSentryMiddleware(Sentry):
def __call__(self, environ, start_response=[]):
# this pretends doesn't require start_response
return list(super(MockSentryMiddleware, self).__call__(environ, start_response))
class MockClient(DjangoClient):
def __init__(self, *args, **kwargs):
self.events = []
super(MockClient, self).__init__(*args, **kwargs)
def send(self, **kwargs):
self.events.append(kwargs)
def is_enabled(self, **kwargs):
return True
class DisabledMockClient(MockClient):
def is_enabled(self, **kwargs):
return False
class Settings(object):
"""
Allows you to define settings that are required for this function to work.
>>> with Settings(SENTRY_LOGIN_URL='foo'): #doctest: +SKIP
>>> print settings.SENTRY_LOGIN_URL #doctest: +SKIP
"""
NotDefined = object()
def __init__(self, **overrides):
self.overrides = overrides
self._orig = {}
def __enter__(self):
for k, v in iteritems(self.overrides):
self._orig[k] = getattr(settings, k, self.NotDefined)
setattr(settings, k, v)
def __exit__(self, exc_type, exc_value, traceback):
for k, v in iteritems(self._orig):
if v is self.NotDefined:
delattr(settings, k)
else:
setattr(settings, k, v)
class ClientProxyTest(TestCase):
def test_proxy_responds_as_client(self):
assert get_client() == client
@mock.patch.object(MockClient, 'captureMessage')
def test_basic(self, captureMessage):
client.captureMessage(message='foo')
captureMessage.assert_called_once_with(message='foo')
@pytest.mark.usefixtures("user_instance")
class DjangoClientTest(TestCase):
# Fixture setup/teardown
urls = 'tests.contrib.django.urls'
def setUp(self):
self.raven = get_client()
self.handler = SentryDjangoHandler(self.raven)
self.handler.install()
self.addCleanup(self.handler.uninstall)
def test_basic(self):
self.raven.captureMessage(message='foo')
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'sentry.interfaces.Message' in event
message = event['sentry.interfaces.Message']
assert message['message'] == 'foo'
assert event['level'] == logging.ERROR
assert event['message'] == 'foo'
assert isinstance(event['timestamp'], datetime.datetime)
def test_signal_integration(self):
try:
int(None)
except Exception:
got_request_exception.send(sender=self.__class__, request=None)
else:
self.fail('Expected an exception.')
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'exception' in event
exc = event['exception']['values'][-1]
assert exc['type'] == 'TypeError'
assert exc['value'], "int() argument must be a string or a number == not 'NoneType'"
assert event['level'] == logging.ERROR
assert event['message'], "TypeError: int() argument must be a string or a number == not 'NoneType'"
@pytest.mark.skipif(sys.version_info[:2] == (2, 6), reason='Python 2.6')
def test_view_exception(self):
path = reverse('sentry-raise-exc')
self.assertRaises(Exception, self.client.get, path)
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'exception' in event
exc = event['exception']['values'][-1]
assert exc['type'] == 'Exception'
assert exc['value'] == 'view exception'
assert event['level'] == logging.ERROR
assert event['message'] == 'Exception: view exception'
assert 'request' in event
assert event['request']['url'] == 'http://testserver{}'.format(path)
def test_request_data_unavailable_if_request_is_read(self):
with Settings(**{MIDDLEWARE_ATTR: []}):
path = reverse('sentry-readrequest-raise-exc')
self.assertRaises(
AppError,
self.client.post,
path,
'{"a":"b"}',
content_type='application/json')
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert event['request']['data'] == '<unavailable>'
def test_djangorestframeworkcompatmiddleware_fills_request_data(self):
with Settings(**{MIDDLEWARE_ATTR: [
'raven.contrib.django.middleware.DjangoRestFrameworkCompatMiddleware']}):
path = reverse('sentry-readrequest-raise-exc')
self.assertRaises(
AppError,
self.client.post,
path,
'{"a":"b"}',
content_type='application/json')
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert event['request']['data'] == '{"a":"b"}'
def test_capture_event_with_request_middleware(self):
path = reverse('sentry-trigger-event')
resp = self.client.get(path)
assert resp.status_code == 200
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert event['message'] == 'test'
assert 'request' in event
assert event['request']['url'] == 'http://testserver{}'.format(path)
def test_user_info(self):
with Settings(**{MIDDLEWARE_ATTR: [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware']}):
self.assertRaises(Exception, self.client.get, reverse('sentry-raise-exc'))
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'user' in event
user_info = event['user']
assert user_info == {'ip_address': '127.0.0.1'}
assert self.client.login(username='admin', password='password')
self.assertRaises(Exception, self.client.get, reverse('sentry-raise-exc'))
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'user' in event
user_info = event['user']
assert user_info == {
'ip_address': '127.0.0.1',
'username': self.user.username,
'id': self.user.id,
'email': self.user.email,
}
@pytest.mark.skipif(not DJANGO_15, reason='< Django 1.5')
def test_get_user_info_abstract_user(self):
from django.db import models
from django.http import HttpRequest
from django.contrib.auth.models import AbstractBaseUser
class MyUser(AbstractBaseUser):
USERNAME_FIELD = 'username'
username = models.CharField(max_length=32)
email = models.EmailField()
user = MyUser(
username='admin',
email='[email protected]',
id=1,
)
request = HttpRequest()
request.META['REMOTE_ADDR'] = '127.0.0.1'
request.user = user
user_info = self.raven.get_user_info(request)
assert user_info == {
'ip_address': '127.0.0.1',
'username': user.username,
'id': user.id,
'email': user.email,
}
request = HttpRequest()
request.META['REMOTE_ADDR'] = '127.0.0.1'
request.META['HTTP_X_FORWARDED_FOR'] = '1.1.1.1, 2.2.2.2'
request.user = user
user_info = self.raven.get_user_info(request)
assert user_info == {
'ip_address': '1.1.1.1',
'username': user.username,
'id': user.id,
'email': user.email,
}
@pytest.mark.skipif(not DJANGO_110, reason='< Django 1.10')
def test_get_user_info_is_authenticated_property(self):
from django.db import models
from django.http import HttpRequest
from django.contrib.auth.models import AbstractBaseUser
class MyUser(AbstractBaseUser):
USERNAME_FIELD = 'username'
username = models.CharField(max_length=32)
email = models.EmailField()
@property
def is_authenticated(self):
return True
user = MyUser(
username='admin',
email='[email protected]',
id=1,
)
request = HttpRequest()
request.META['REMOTE_ADDR'] = '127.0.0.1'
request.user = user
user_info = self.raven.get_user_info(request)
assert user_info == {
'ip_address': '127.0.0.1',
'username': user.username,
'id': user.id,
'email': user.email,
}
request = HttpRequest()
request.META['REMOTE_ADDR'] = '127.0.0.1'
request.META['HTTP_X_FORWARDED_FOR'] = '1.1.1.1, 2.2.2.2'
request.user = user
user_info = self.raven.get_user_info(request)
assert user_info == {
'ip_address': '1.1.1.1',
'username': user.username,
'id': user.id,
'email': user.email,
}
def test_request_middleware_exception(self):
with Settings(**{MIDDLEWARE_ATTR: ['tests.contrib.django.middleware.BrokenRequestMiddleware']}):
self.assertRaises(ImportError, self.client.get, reverse('sentry-raise-exc'))
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'exception' in event
exc = event['exception']['values'][-1]
assert exc['type'] == 'ImportError'
assert exc['value'] == 'request'
assert event['level'] == logging.ERROR
assert event['message'] == 'ImportError: request'
def test_response_middlware_exception(self):
if django.VERSION[:2] < (1, 3):
return
with Settings(**{MIDDLEWARE_ATTR: ['tests.contrib.django.middleware.BrokenResponseMiddleware']}):
self.assertRaises(ImportError, self.client.get, reverse('sentry-no-error'))
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'exception' in event
exc = event['exception']['values'][-1]
assert exc['type'] == 'ImportError'
assert exc['value'] == 'response'
assert event['level'] == logging.ERROR
assert event['message'] == 'ImportError: response'
def test_broken_500_handler_with_middleware(self):
with Settings(BREAK_THAT_500=True, INSTALLED_APPS=['raven.contrib.django']):
client = DjangoTestClient(REMOTE_ADDR='127.0.0.1')
client.handler = MockSentryMiddleware(MockClientHandler())
self.assertRaises(Exception, client.get, reverse('sentry-raise-exc'))
assert len(self.raven.events) == 2 or 4 # TODO: ash remove duplicate client events
event = self.raven.events.pop(0)
assert 'exception' in event
exc = event['exception']['values'][-1]
assert exc['type'] == 'Exception'
assert exc['value'] == 'view exception'
assert event['level'] == logging.ERROR
assert event['message'] == 'Exception: view exception'
event = self.raven.events.pop(0)
assert 'exception' in event
exc = event['exception']['values'][-1]
assert exc['type'] == 'ValueError'
assert exc['value'] == 'handler500'
assert event['level'] == logging.ERROR
assert event['message'] == 'ValueError: handler500'
def test_view_middleware_exception(self):
with Settings(**{MIDDLEWARE_ATTR: ['tests.contrib.django.middleware.BrokenViewMiddleware']}):
self.assertRaises(ImportError, self.client.get, reverse('sentry-raise-exc'))
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'exception' in event
exc = event['exception']['values'][-1]
assert exc['type'] == 'ImportError'
assert exc['value'] == 'view'
assert event['level'] == logging.ERROR
assert event['message'] == 'ImportError: view'
@pytest.mark.skipif(DJANGO_18, reason='Django 1.8+ not supported')
def test_template_name_as_view(self):
self.assertRaises(TemplateSyntaxError, self.client.get, reverse('sentry-template-exc'))
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert event['culprit'] == 'error.html'
# def test_request_in_logging(self):
# resp = self.client.get(reverse('sentry-log-request-exc'))
# assert resp.status_code == 200
# assert len(self.raven.events) == 1
# event = self.raven.events.pop(0)
# assert event['culprit'] == 'tests.contrib.django.views in logging_request_exc'
# assert event['data']['META']['REMOTE_ADDR'] == '127.0.0.1'
# TODO: Python bug #10805
@pytest.mark.skipif(not PY2, reason='Python 2')
def test_record_none_exc_info(self):
# sys.exc_info can return (None, None, None) if no exception is being
# handled anywhere on the stack. See:
# http://docs.python.org/library/sys.html#sys.exc_info
record = logging.LogRecord(
'foo',
logging.INFO,
pathname=None,
lineno=None,
msg='test',
args=(),
exc_info=(None, None, None),
)
handler = SentryHandler()
handler.emit(record)
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert event['message'] == 'test'
def test_404_middleware(self):
with Settings(**{MIDDLEWARE_ATTR: ['raven.contrib.django.middleware.Sentry404CatchMiddleware']}):
resp = self.client.get('/non-existent-page')
assert resp.status_code == 404
assert len(self.raven.events) == 1, [e['message'] for e in self.raven.events]
event = self.raven.events.pop(0)
assert event['level'] == logging.INFO
assert event['logger'] == 'http404'
assert 'request' in event
http = event['request']
assert http['url'] == 'http://testserver/non-existent-page'
assert http['method'] == 'GET'
assert http['query_string'] == ''
assert http['data'] is None
def test_404_middleware_when_disabled(self):
extra_settings = {
MIDDLEWARE_ATTR: ['raven.contrib.django.middleware.Sentry404CatchMiddleware'],
'SENTRY_CLIENT': 'tests.contrib.django.tests.DisabledMockClient',
}
with Settings(**extra_settings):
resp = self.client.get('/non-existent-page')
assert resp.status_code == 404
assert self.raven.events == []
def test_invalid_client(self):
extra_settings = {
'SENTRY_CLIENT': 'raven.contrib.django.DjangoClient', # default
}
# Should return fallback client (MockClient)
client = get_client('nonexistent.and.invalid')
# client should be valid, and the same as with the next call.
assert client is get_client()
with Settings(**extra_settings):
assert isinstance(get_client(), DjangoClient)
def test_transport_specification(self):
extra_settings = {
'SENTRY_TRANSPORT': 'raven.transport.HTTPTransport',
'SENTRY_DSN': 'http://public:[email protected]/1',
}
with Settings(**extra_settings):
client = get_client(reset=True)
assert type(client.remote.get_transport()) is HTTPTransport
def test_response_error_id_middleware(self):
# TODO: test with 500s
with Settings(**{MIDDLEWARE_ATTR: [
'raven.contrib.django.middleware.SentryResponseErrorIdMiddleware',
'raven.contrib.django.middleware.Sentry404CatchMiddleware']}):
resp = self.client.get('/non-existent-page')
assert resp.status_code == 404
headers = dict(resp.items())
assert 'X-Sentry-ID' in headers
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert event['event_id'] == headers['X-Sentry-ID']
def test_get_client(self):
assert get_client() == get_client()
assert get_client('raven.base.Client').__class__ == Client
assert get_client() == self.raven
assert get_client('%s.%s' % (type(self.raven).__module__, type(self.raven).__name__)) == self.raven
assert get_client() == self.raven
def test_raw_post_data_partial_read(self):
v = '{"foo": "bar"}'
request = make_request()
request.environ.update({
'wsgi.input': StringIO(v + '\r\n\r\n'),
'CONTENT_TYPE': 'application/octet-stream',
'CONTENT_LENGTH': len(v),
})
request.read(1)
self.raven.captureMessage(message='foo', request=request)
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'request' in event
http = event['request']
assert http['method'] == 'POST'
assert http['data'] == '<unavailable>'
def test_read_post_data(self):
request = make_request()
request.POST = QueryDict("foo=bar&ham=spam")
request.read(1)
self.raven.captureMessage(message='foo', request=request)
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'request' in event
http = event['request']
assert http['method'] == 'POST'
assert http['data'] == {'foo': 'bar', 'ham': 'spam'}
def test_request_capture(self):
request = make_request()
request.read(1)
self.raven.captureMessage(message='foo', request=request)
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'request' in event
http = event['request']
assert http['method'] == 'POST'
assert http['data'] == '<unavailable>'
assert 'headers' in http
headers = http['headers']
assert 'Content-Type' in headers, headers.keys()
assert headers['Content-Type'] == 'text/html'
env = http['env']
assert 'SERVER_NAME' in env, env.keys()
assert env['SERVER_NAME'] == 'testserver'
assert 'SERVER_PORT' in env, env.keys()
assert env['SERVER_PORT'] == '80'
def test_marks_django_frames_correctly(self):
self.assertRaises(TemplateSyntaxError, self.client.get, reverse('sentry-template-exc'))
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
frames = event['exception']['values'][-1]['stacktrace']['frames']
for frame in frames:
if frame['module'].startswith('django.'):
assert frame.get('in_app') is False
def test_adds_site_to_tags(self):
self.assertRaises(TemplateSyntaxError, self.client.get, reverse('sentry-template-exc'))
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
tags = event['tags']
assert 'site' in event['tags']
assert tags['site'] == 'example.com'
def test_adds_site_to_tags_fallback(self):
with Settings(SITE_ID=12345): # nonexistent site, should fallback to SITE_ID
self.assertRaises(TemplateSyntaxError, self.client.get, reverse('sentry-template-exc'))
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
tags = event['tags']
assert 'site' in event['tags']
assert tags['site'] == 12345
def test_settings_site_overrides_contrib(self):
self.raven.site = 'FOO'
self.assertRaises(TemplateSyntaxError, self.client.get, reverse('sentry-template-exc'))
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
tags = event['tags']
assert 'site' in event['tags']
assert tags['site'] == 'FOO'
@mock.patch.object(WSGIRequest, 'build_absolute_uri')
def test_suspicious_operation_in_build_absolute_uri(self, build_absolute_uri):
build_absolute_uri.side_effect = SuspiciousOperation()
request = make_request()
request.META['HTTP_HOST'] = 'example.com'
result = self.raven.get_data_from_request(request)
build_absolute_uri.assert_called_once_with()
assert 'request' in result
assert result['request']['url'] == 'http://example.com/'
class DjangoTemplateTagTest(TestCase):
@mock.patch('raven.contrib.django.DjangoClient.get_public_dsn')
def test_sentry_public_dsn_no_args(self, get_public_dsn):
sentry_public_dsn()
get_public_dsn.assert_called_once_with(None)
@mock.patch('raven.contrib.django.DjangoClient.get_public_dsn')
def test_sentry_public_dsn_with_https(self, get_public_dsn):
sentry_public_dsn('https')
get_public_dsn.assert_called_once_with('https')
class DjangoLoggingTest(TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
self.raven = get_client()
def test_request_kwarg(self):
handler = SentryHandler()
logger = self.logger
logger.handlers = []
logger.addHandler(handler)
logger.error('This is a test error', extra={
'request': WSGIRequest(environ={
'wsgi.input': StringIO(),
'REQUEST_METHOD': 'POST',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'CONTENT_TYPE': 'application/octet-stream',
'ACCEPT': 'application/json',
})
})
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'request' in event
http = event['request']
assert http['method'] == 'POST'
def test_tags(self):
tags = {'tag1': 'test'}
handler = SentryHandler(tags=tags)
logger = self.logger
logger.handlers = []
logger.addHandler(handler)
logger.error('This is a test error')
assert len(self.raven.events) == 1
event = self.raven.events.pop(0)
assert 'tags' in event
# event['tags'] also contains some other data, like 'site'
assert 'tag1' in event['tags']
assert event['tags']['tag1'] == tags['tag1']
class CeleryIsolatedClientTest(TestCase):
def setUp(self):
self.client = CeleryClient(
dsn='sync+http://public:[email protected]/1'
)
@mock.patch('raven.contrib.django.celery.send_raw')
def test_send_encoded(self, send_raw):
self.client.send_encoded('foo')
send_raw.delay.assert_called_once_with('foo')
@mock.patch('raven.contrib.django.celery.send_raw')
def test_without_eager(self, send_raw):
"""
Integration test to ensure it propagates all the way down
and calls delay on the task.
"""
self.client.captureMessage(message='test')
assert send_raw.delay.call_count == 1
class IsValidOriginTestCase(TestCase):
def test_setting_empty(self):
with Settings(SENTRY_ALLOW_ORIGIN=None):
assert not is_valid_origin('http://example.com')
def test_setting_all(self):
with Settings(SENTRY_ALLOW_ORIGIN='*'):
assert is_valid_origin('http://example.com')
def test_setting_uri(self):
with Settings(SENTRY_ALLOW_ORIGIN=['http://example.com']):
assert is_valid_origin('http://example.com')
def test_setting_regexp(self):
with Settings(SENTRY_ALLOW_ORIGIN=[re.compile('https?\://(.*\.)?example\.com')]):
assert is_valid_origin('http://example.com')
class ReportViewTest(TestCase):
urls = 'raven.contrib.django.urls'
def setUp(self):
super(ReportViewTest, self).setUp()
self.path = reverse('raven-report', args=['1'], urlconf=self.urls)
@mock.patch('raven.contrib.django.views.is_valid_origin')
def test_calls_is_valid_origin_with_header(self, is_valid_origin):
with self.settings(ROOT_URLCONF=self.urls):
self.client.post(self.path, HTTP_ORIGIN='http://example.com')
is_valid_origin.assert_called_once_with('http://example.com')
@mock.patch('raven.contrib.django.views.is_valid_origin')
def test_calls_is_valid_origin_with_header_as_get(self, is_valid_origin):
with self.settings(ROOT_URLCONF=self.urls):
self.client.get(self.path, HTTP_ORIGIN='http://example.com')
is_valid_origin.assert_called_once_with('http://example.com')
@mock.patch('raven.contrib.django.views.is_valid_origin', mock.Mock(return_value=False))
def test_fails_on_invalid_origin(self):
with self.settings(ROOT_URLCONF=self.urls):
resp = self.client.post(self.path, HTTP_ORIGIN='http://example.com')
assert resp.status_code == 403
@mock.patch('raven.contrib.django.views.is_valid_origin', mock.Mock(return_value=True))
def test_options_call_sends_headers(self):
with self.settings(ROOT_URLCONF=self.urls):
resp = self.client.options(self.path, HTTP_ORIGIN='http://example.com')
assert resp.status_code == 200
assert resp['Access-Control-Allow-Origin'] == 'http://example.com'
assert resp['Access-Control-Allow-Methods'], 'GET, POST == OPTIONS'
@mock.patch('raven.contrib.django.views.is_valid_origin', mock.Mock(return_value=True))
def test_missing_data(self):
with self.settings(ROOT_URLCONF=self.urls):
resp = self.client.post(self.path, HTTP_ORIGIN='http://example.com')
assert resp.status_code == 400
@mock.patch('raven.contrib.django.views.is_valid_origin', mock.Mock(return_value=True))
def test_invalid_data(self):
with self.settings(ROOT_URLCONF=self.urls):
resp = self.client.post(self.path, HTTP_ORIGIN='http://example.com',
data='[1', content_type='application/octet-stream')
assert resp.status_code == 400
@mock.patch('raven.contrib.django.views.is_valid_origin', mock.Mock(return_value=True))
def test_sends_data(self):
with self.settings(ROOT_URLCONF=self.urls):
resp = self.client.post(self.path, HTTP_ORIGIN='http://example.com',
data='{}', content_type='application/octet-stream')
assert resp.status_code == 200
event = client.events.pop(0)
assert event == {'auth_header': None}
@mock.patch('raven.contrib.django.views.is_valid_origin', mock.Mock(return_value=True))
def test_sends_authorization_header(self):
with self.settings(ROOT_URLCONF=self.urls):
resp = self.client.post(self.path, HTTP_ORIGIN='http://example.com',
HTTP_AUTHORIZATION='Sentry foo/bar', data='{}', content_type='application/octet-stream')
assert resp.status_code == 200
event = client.events.pop(0)
assert event == {'auth_header': 'Sentry foo/bar'}
@mock.patch('raven.contrib.django.views.is_valid_origin', mock.Mock(return_value=True))
def test_sends_x_sentry_auth_header(self):
with self.settings(ROOT_URLCONF=self.urls):
resp = self.client.post(self.path, HTTP_ORIGIN='http://example.com',
HTTP_X_SENTRY_AUTH='Sentry foo/bar', data='{}',
content_type='application/octet-stream')
assert resp.status_code == 200
event = client.events.pop(0)
assert event == {'auth_header': 'Sentry foo/bar'}
class PromiseSerializerTestCase(TestCase):
def test_basic(self):
from django.utils.functional import lazy
obj = lazy(lambda: 'bar', text_type)()
res = transform(obj)
expected = "'bar'" if not PY2 else "u'bar'"
assert res == expected
def test_handles_gettext_lazy(self):
from django.utils.functional import lazy
def fake_gettext(to_translate):
return 'Igpay Atinlay'
fake_gettext_lazy = lazy(fake_gettext, text_type)
result = transform(fake_gettext_lazy("something"))
assert isinstance(result, string_types)
expected = "'Igpay Atinlay'" if not PY2 else "u'Igpay Atinlay'"
assert result == expected
def test_real_gettext_lazy(self):
d = {text_type('lazy_translation'): gettext_lazy(text_type('testing'))}
key = "'lazy_translation'" if not PY2 else "u'lazy_translation'"
value = "'testing'" if not PY2 else "u'testing'"
assert transform(d) == {key: value}
class ModelInstanceSerializerTestCase(object):
def test_basic(self, mytest_model):
instance = mytest_model()
result = transform(instance)
assert isinstance(result, string_types)
assert result == '<MyTestModel: MyTestModel object>'
class QuerySetSerializerTestCase(object):
def test_basic(self, mytest_model):
from django.db.models.query import QuerySet
obj = QuerySet(model=mytest_model)
result = transform(obj)
assert isinstance(result, string_types)
assert result == '<QuerySet: model=MyTestModel>'
class SentryExceptionHandlerTest(TestCase):
@fixture
def request(self):
return make_request()
@fixture
def exc_info(self):
return (ValueError, ValueError('lol world'), None)
def setUp(self):
super(SentryExceptionHandlerTest, self).setUp()
self.client = get_client()
self.handler = SentryDjangoHandler(self.client)
@mock.patch.object(MockClient, 'captureException')
@mock.patch('sys.exc_info')
def test_does_capture_exception(self, exc_info, captureException):
exc_info.return_value = self.exc_info
self.handler.exception_handler(request=self.request)
captureException.assert_called_once_with(exc_info=self.exc_info, request=self.request)
@mock.patch.object(MockClient, 'send')
@mock.patch('sys.exc_info')
def test_does_exclude_filtered_types(self, exc_info, mock_send):
exc_info.return_value = self.exc_info
try:
self.client.ignore_exceptions = set(['ValueError'])
self.handler.exception_handler(request=self.request)
finally:
self.client.ignore_exceptions.clear()
assert not mock_send.called
@mock.patch.object(MockClient, 'send')
@mock.patch('sys.exc_info')
def test_ignore_exceptions_with_expression_match(self, exc_info, mock_send):
exc_info.return_value = self.exc_info
try:
if not PY2:
self.client.ignore_exceptions = set(['builtins.*'])
else:
self.client.ignore_exceptions = set(['exceptions.*'])
self.handler.exception_handler(request=self.request)
finally:
self.client.ignore_exceptions.clear()
assert not mock_send.called
@mock.patch.object(MockClient, 'send')
@mock.patch('sys.exc_info')
def test_ignore_exceptions_with_module_match(self, exc_info, mock_send):
exc_info.return_value = self.exc_info
try:
if not PY2:
self.client.ignore_exceptions = set(['builtins.ValueError'])
else:
self.client.ignore_exceptions = set(['exceptions.ValueError'])
self.handler.exception_handler(request=self.request)
finally:
self.client.ignore_exceptions.clear()
assert not mock_send.called
class SQLHookTestCase(TestCase):
def test_wrong_params(self):
query = 'SELECT COUNT(*) FROM mytestmodel WHERE id = %s'
args = ['foobar', 42]
record_sql(None, None, None, None, query, args)
crumbs = get_client().context.breadcrumbs.get_buffer()
self.assertEqual(crumbs[-1]['message'], query)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class ListManagementTermOperations(object):
"""ListManagementTermOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def add_term(
self, list_id, term, language, custom_headers=None, raw=False, **operation_config):
"""Add a term to the term list with list Id equal to list Id passed.
:param list_id: List Id of the image list.
:type list_id: str
:param term: Term to be deleted
:type term: str
:param language: Language of the terms.
:type language: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: object or ClientRawResponse if raw=true
:rtype: object or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/lists/v1.0/termlists/{listId}/terms/{term}'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'listId': self._serialize.url("list_id", list_id, 'str'),
'term': self._serialize.url("term", term, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['language'] = self._serialize.query("language", language, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [201]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_term(
self, list_id, term, language, custom_headers=None, raw=False, **operation_config):
"""Deletes a term from the list with list Id equal to the list Id passed.
:param list_id: List Id of the image list.
:type list_id: str
:param term: Term to be deleted
:type term: str
:param language: Language of the terms.
:type language: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/lists/v1.0/termlists/{listId}/terms/{term}'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'listId': self._serialize.url("list_id", list_id, 'str'),
'term': self._serialize.url("term", term, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['language'] = self._serialize.query("language", language, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 204:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_terms(
self, list_id, language, offset=None, limit=None, custom_headers=None, raw=False, **operation_config):
"""Gets all terms from the list with list Id equal to the list Id passed.
:param list_id: List Id of the image list.
:type list_id: str
:param language: Language of the terms.
:type language: str
:param offset: The pagination start index.
:type offset: int
:param limit: The max limit.
:type limit: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Terms or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.contentmoderator.models.Terms
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/lists/v1.0/termlists/{listId}/terms'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'listId': self._serialize.url("list_id", list_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['language'] = self._serialize.query("language", language, 'str')
if offset is not None:
query_parameters['offset'] = self._serialize.query("offset", offset, 'int')
if limit is not None:
query_parameters['limit'] = self._serialize.query("limit", limit, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Terms', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_all_terms(
self, list_id, language, custom_headers=None, raw=False, **operation_config):
"""Deletes all terms from the list with list Id equal to the list Id
passed.
:param list_id: List Id of the image list.
:type list_id: str
:param language: Language of the terms.
:type language: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/lists/v1.0/termlists/{listId}/terms'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'listId': self._serialize.url("list_id", list_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['language'] = self._serialize.query("language", language, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 204:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
# -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import io
import re
import warnings
from functools import partial
from functools import update_wrapper
from itertools import chain
from ._compat import BytesIO
from ._compat import implements_iterator
from ._compat import make_literal_wrapper
from ._compat import string_types
from ._compat import text_type
from ._compat import to_bytes
from ._compat import to_unicode
from ._compat import try_coerce_native
from ._compat import wsgi_get_bytes
from ._internal import _encode_idna
from .urls import uri_to_iri
from .urls import url_join
from .urls import url_parse
from .urls import url_quote
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(
environ,
root_only=False,
strip_querystring=False,
host_only=False,
trusted_hosts=None,
):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here's an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ["wsgi.url_scheme"], "://", get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri("".join(tmp) + "/")
cat(url_quote(wsgi_get_bytes(environ.get("SCRIPT_NAME", ""))).rstrip("/"))
cat("/")
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get("PATH_INFO", "")).lstrip(b"/")))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat("?" + qs)
return uri_to_iri("".join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ":" in hostname:
hostname = hostname.rsplit(":", 1)[0]
return _encode_idna(hostname)
try:
hostname = _normalize(hostname)
except UnicodeError:
return False
for ref in trusted_list:
if ref.startswith("."):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
try:
ref = _normalize(ref)
except UnicodeError:
return False
if ref == hostname:
return True
if suffix_match and hostname.endswith(b"." + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the host for the given WSGI environment. This first checks
the ``Host`` header. If it's not present, then ``SERVER_NAME`` and
``SERVER_PORT`` are used. The host will only contain the port if it
is different than the standard port for the protocol.
Optionally, verify that the host is trusted using
:func:`host_is_trusted` and raise a
:exc:`~werkzeug.exceptions.SecurityError` if it is not.
:param environ: The WSGI environment to get the host from.
:param trusted_hosts: A list of trusted hosts.
:return: Host, with port if necessary.
:raise ~werkzeug.exceptions.SecurityError: If the host is not
trusted.
"""
if "HTTP_HOST" in environ:
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
else:
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from .exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available or chunked transfer encoding is used,
``None`` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
if environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked":
return None
content_length = environ.get("CONTENT_LENGTH")
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
If content length is not set, the stream will be empty for safety reasons.
If the WSGI server supports chunked or infinite streams, it should set
the ``wsgi.input_terminated`` value in the WSGI environ to indicate that.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe_fallback: use an empty stream as a safe fallback when the
content length is not set. Disabling this allows infinite streams,
which can be a denial-of-service risk.
"""
stream = environ["wsgi.input"]
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can safely
# read it until the end.
if environ.get("wsgi.input_terminated"):
return stream
# If the request doesn't specify a content length, returning the stream is
# potentially dangerous because it could be infinite, malicious or not. If
# safe_fallback is true, return an empty stream instead for safety.
if content_length is None:
return BytesIO() if safe_fallback else stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get("QUERY_STRING", ""))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=":&%=+$!*'(),"))
def get_path_info(environ, charset="utf-8", errors="replace"):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get("PATH_INFO", ""))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset="utf-8", errors="replace"):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get("SCRIPT_NAME", ""))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset="utf-8", errors="replace"):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get("PATH_INFO")
if not path:
return None
script_name = environ.get("SCRIPT_NAME", "")
# shift multiple leading slashes over
old_path = path
path = path.lstrip("/")
if path != old_path:
script_name += "/" * (len(old_path) - len(path))
if "/" not in path:
environ["PATH_INFO"] = ""
environ["SCRIPT_NAME"] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split("/", 1)
environ["PATH_INFO"] = "/" + path
environ["SCRIPT_NAME"] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset="utf-8", errors="replace"):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get("PATH_INFO", "").lstrip("/").split("/", 1)
if segments:
return to_unicode(
wsgi_get_bytes(segments[0]), charset, errors, allow_none_charset=True
)
def extract_path_info(
environ_or_baseurl,
path_or_url,
charset="utf-8",
errors="werkzeug.url_quote",
collapse_http_schemes=True,
):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
.. versionchanged:: 0.15
The ``errors`` parameter defaults to leaving invalid bytes
quoted instead of replacing them.
.. versionadded:: 0.6
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u"@", 1)[-1].split(u":", 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u"http" and port == u"80") or (
scheme == u"https" and port == u"443"
):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u":" + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u"http", u"https"):
return None
else:
if not (base_scheme in (u"http", u"https") and base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u"/")
if not cur_path.startswith(base_path):
return None
return u"/" + cur_path[len(base_path) :].lstrip(u"/")
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of the iterable returned by the application.
Because it is useful to add another close action to a returned iterable
and adding a custom iterable is a boring task this class can be used for
that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterable, "close", None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get("wsgi.file_wrapper", FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, "close"):
self.file.close()
def seekable(self):
if hasattr(self.file, "seekable"):
return self.file.seekable()
if hasattr(self.file, "seek"):
return True
return False
def seek(self, *args):
if hasattr(self.file, "seek"):
self.file.seek(*args)
def tell(self):
if hasattr(self.file, "tell"):
return self.file.tell()
return None
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
@implements_iterator
class _RangeWrapper(object):
# private for now, but should we make it public in the future ?
"""This class can be used to convert an iterable object into
an iterable that will only yield a piece of the underlying content.
It yields blocks until the underlying stream range is fully read.
The yielded blocks will have a size that can't exceed the original
iterator defined block size, but that can be smaller.
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param iterable: an iterable object with a :meth:`__next__` method.
:param start_byte: byte from which read will start.
:param byte_range: how many bytes to read.
"""
def __init__(self, iterable, start_byte=0, byte_range=None):
self.iterable = iter(iterable)
self.byte_range = byte_range
self.start_byte = start_byte
self.end_byte = None
if byte_range is not None:
self.end_byte = self.start_byte + self.byte_range
self.read_length = 0
self.seekable = hasattr(iterable, "seekable") and iterable.seekable()
self.end_reached = False
def __iter__(self):
return self
def _next_chunk(self):
try:
chunk = next(self.iterable)
self.read_length += len(chunk)
return chunk
except StopIteration:
self.end_reached = True
raise
def _first_iteration(self):
chunk = None
if self.seekable:
self.iterable.seek(self.start_byte)
self.read_length = self.iterable.tell()
contextual_read_length = self.read_length
else:
while self.read_length <= self.start_byte:
chunk = self._next_chunk()
if chunk is not None:
chunk = chunk[self.start_byte - self.read_length :]
contextual_read_length = self.start_byte
return chunk, contextual_read_length
def _next(self):
if self.end_reached:
raise StopIteration()
chunk = None
contextual_read_length = self.read_length
if self.read_length == 0:
chunk, contextual_read_length = self._first_iteration()
if chunk is None:
chunk = self._next_chunk()
if self.end_byte is not None and self.read_length >= self.end_byte:
self.end_reached = True
return chunk[: self.end_byte - contextual_read_length]
return chunk
def __next__(self):
chunk = self._next()
if chunk:
return chunk
self.end_reached = True
raise StopIteration()
def close(self):
if hasattr(self.iterable, "close"):
self.iterable.close()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError(
"Passed a string or byte object instead of true iterator or stream."
)
if not hasattr(stream, "read"):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024, cap_at_buffer=False):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, "")
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s("")
cr = s("\r")
lf = s("\n")
crlf = s("\r\n")
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, "")
if not new_data:
break
new_buf = []
buf_size = 0
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
buf_size += len(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
elif cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(
stream, separator, limit=None, buffer_size=10 * 1024, cap_at_buffer=False
):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, "")
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r"(%s)" % re.escape(separator)).split
_join = u"".join
else:
separator = to_bytes(separator)
_split = re.compile(b"(" + re.escape(separator) + b")").split
_join = b"".join
buffer = []
while 1:
new_data = next(_iter, "")
if not new_data:
break
chunks = _split(new_data)
new_buf = []
buf_size = 0
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
buf_size = 0
else:
buf_size += len(item)
new_buf.append(item)
if cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buf_size = len(rv)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(io.IOBase):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from .exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readable(self):
return True
# DEPRECATED
from .middleware.dispatcher import DispatcherMiddleware as _DispatcherMiddleware
from .middleware.http_proxy import ProxyMiddleware as _ProxyMiddleware
from .middleware.shared_data import SharedDataMiddleware as _SharedDataMiddleware
class ProxyMiddleware(_ProxyMiddleware):
"""
.. deprecated:: 0.15
``werkzeug.wsgi.ProxyMiddleware`` has moved to
:mod:`werkzeug.middleware.http_proxy`. This import will be
removed in 1.0.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"'werkzeug.wsgi.ProxyMiddleware' has moved to 'werkzeug"
".middleware.http_proxy.ProxyMiddleware'. This import is"
" deprecated as of version 0.15 and will be removed in"
" version 1.0.",
DeprecationWarning,
stacklevel=2,
)
super(ProxyMiddleware, self).__init__(*args, **kwargs)
class SharedDataMiddleware(_SharedDataMiddleware):
"""
.. deprecated:: 0.15
``werkzeug.wsgi.SharedDataMiddleware`` has moved to
:mod:`werkzeug.middleware.shared_data`. This import will be
removed in 1.0.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"'werkzeug.wsgi.SharedDataMiddleware' has moved to"
" 'werkzeug.middleware.shared_data.SharedDataMiddleware'."
" This import is deprecated as of version 0.15 and will be"
" removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
super(SharedDataMiddleware, self).__init__(*args, **kwargs)
class DispatcherMiddleware(_DispatcherMiddleware):
"""
.. deprecated:: 0.15
``werkzeug.wsgi.DispatcherMiddleware`` has moved to
:mod:`werkzeug.middleware.dispatcher`. This import will be
removed in 1.0.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"'werkzeug.wsgi.DispatcherMiddleware' has moved to"
" 'werkzeug.middleware.dispatcher.DispatcherMiddleware'."
" This import is deprecated as of version 0.15 and will be"
" removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
super(DispatcherMiddleware, self).__init__(*args, **kwargs)
|
|
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
import struct
from scipy.spatial.distance import hamming
##################################################################################################
# Setting the path for searching modules
##################################################################################################
import os
import inspect
current_directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
grandparent_directory = os.path.dirname(os.path.dirname(current_directory))
os.sys.path.insert(0,grandparent_directory)
##################################################################################################
from brain.model.model import ModelFitLibrary
from brain.machines.deep_belief_network import DBN
from brain.machines.trbm import TRBM
def binary(num):
return ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', num))
def distance(x, y):
d = 0.
for i in xrange(len(x)):
a = np.int32(list(binary(x[i])))
b = np.int32(list(binary(y[i])))
d = d + hamming(a,b)
d = d / len(x)
return d
# def distance(x, y):
# return np.sqrt(np.sum(np.square(np.sqrt(x) - np.sqrt(y)))) / np.sqrt(2)
def rescale(arr, arr_min=None, arr_max=None):
new_arr = np.array(np.abs(arr))
if arr_min == None:
arr_min = np.min(new_arr, 0)
#arr_min = arr_min - arr_min / 10.
arr_max = np.max(new_arr, 0)
#arr_max = arr_max + arr_max / 10.
ranges = (arr_max - arr_min) + 1e-10
for i in xrange(new_arr.shape[0]):
new_arr[i] = (new_arr[i] - arr_min) / ranges
new_arr = np.clip(new_arr, 0., 1.)
return new_arr, arr_min, arr_max
def test_trial(index, fig1, fig2, title, \
train, retrain, test, \
train_data, retrain_data, test_data, anomalous_data, \
model_manager, curve_function, number_of_model_parameters, sequence_len, number_of_sequences):
#training data
network, arr_min, arr_max = train(train_data, curve_function, model_manager, number_of_model_parameters, sequence_len)
long_term_memory = deepcopy(network)
#test data - uniform data
distances_same_distribution = test(test_data, arr_min, arr_max, curve_function, network, model_manager, number_of_model_parameters, sequence_len)
threshold = np.mean(distances_same_distribution) + 2.5 * np.std(distances_same_distribution)
#test data - zeros
distances_other_distribution = test(anomalous_data, arr_min, arr_max, curve_function, network, model_manager, number_of_model_parameters, sequence_len, np.zeros(number_of_model_parameters))
#train long-term memory
if type(retrain_data) is list:
for i in xrange(len(retrain_data)):
long_term_memory = retrain(retrain_data[i], arr_min, arr_max, long_term_memory, curve_function, model_manager, sequence_len)
else:
long_term_memory = retrain(retrain_data, arr_min, arr_max, long_term_memory, curve_function, model_manager, sequence_len)
#test data - uniform data
distances_same_distribution_lt = test(test_data, arr_min, arr_max, curve_function, long_term_memory, model_manager, number_of_model_parameters, sequence_len)
threshold_lt = np.mean(distances_same_distribution_lt) + 2.5 * np.std(distances_same_distribution_lt)
#test data - zeros
y = np.zeros(len(anomalous_data))
distances_other_distribution_lt = test(anomalous_data, arr_min, arr_max, curve_function, long_term_memory, model_manager, number_of_model_parameters, sequence_len, np.zeros(number_of_model_parameters))
#distance plot
plt.figure(1)
fig1.add_subplot(3,3,index)
plt.plot(np.ones(number_of_sequences) * threshold, 'g')
plt.plot(distances_same_distribution, 'b')
maximum_plot1 = max(distances_same_distribution)
plt.plot(distances_other_distribution, 'r')
maximum_plot2 = max(distances_other_distribution)
plt.ylim((0, 1))
plt.title(title)
plt.xlabel('t')
plt.ylabel('distance')
plt.figure(2)
fig2.add_subplot(3,3,index)
plt.plot(np.ones(number_of_sequences) * threshold_lt, 'g')
plt.plot(distances_same_distribution_lt, 'b')
maximum_plot1 = max(distances_same_distribution_lt)
plt.plot(distances_other_distribution_lt, 'r')
maximum_plot2 = max(distances_other_distribution_lt)
plt.ylim((0, 1))
plt.title(title)
plt.xlabel('t')
plt.ylabel('distance')
def test_feature_trial(index, fig1, fig2, title, train, retrain, train_with_models, test, \
train_data, retrain_data, test_data, anomalous_data, \
model_manager, sequence_len, number_of_sequences, use_bad_memory=False):
#training data
network, arr_min, arr_max = train(train_data, model_manager, sequence_len)
long_term_memory = deepcopy(network)
#test data - uniform data
distances_same_distribution,_ = test(test_data, arr_min, arr_max, network, model_manager, sequence_len)
threshold = np.mean(distances_same_distribution) + 2.5 * np.std(distances_same_distribution)
#test data - zeros
distances_other_distribution, _ = test(anomalous_data, arr_min, arr_max, network, model_manager, sequence_len)
#train long-term memory
if type(retrain_data) is list:
for i in xrange(len(retrain_data)):
long_term_memory = retrain(retrain_data[i], arr_min, arr_max, long_term_memory, None, model_manager, sequence_len, use_bad_memory=use_bad_memory)
else:
long_term_memory = retrain(retrain_data, arr_min, arr_max, long_term_memory, None, model_manager, sequence_len, use_bad_memory=use_bad_memory)
#test data - uniform data
distances_same_distribution_lt,_ = test(test_data, arr_min, arr_max, long_term_memory, model_manager, sequence_len)
threshold_lt = np.mean(distances_same_distribution_lt) + 2.5 * np.std(distances_same_distribution_lt)
#test data - zeros
distances_other_distribution_lt,_ = test(anomalous_data, arr_min, arr_max, long_term_memory, model_manager, sequence_len)
#distance plot
plt.figure(1)
fig1.add_subplot(3,3,index)
plt.plot(np.ones(number_of_sequences) * threshold, 'g')
plt.plot(distances_same_distribution, 'b')
maximum_plot1 = max(distances_same_distribution)
plt.plot(distances_other_distribution, 'r')
maximum_plot2 = max(distances_other_distribution)
plt.ylim((0, 1))
plt.title(title)
plt.xlabel('t')
plt.ylabel('distance')
plt.figure(2)
fig2.add_subplot(3,3,index)
plt.plot(np.ones(number_of_sequences) * threshold_lt, 'g')
plt.plot(distances_same_distribution_lt, 'b')
maximum_plot1 = max(distances_same_distribution_lt)
plt.plot(distances_other_distribution_lt, 'r')
maximum_plot2 = max(distances_other_distribution_lt)
plt.ylim((0, 1))
plt.title(title)
plt.xlabel('t')
plt.ylabel('distance')
def test_sensor_trial(title, \
train, retrain, test, \
train_data, retrain_data, test_data, anomalous_data, \
model_manager, curve_function, number_of_model_parameters, sequence_len, number_of_sequences):
network,arr_min, arr_max = train(train_data, curve_function, model_manager, number_of_model_parameters, sequence_len)
long_term_memory = deepcopy(network)
distances_no_faults = test(test_data,arr_min, arr_max, curve_function, network, model_manager, number_of_model_parameters, sequence_len)
threshold = np.mean(distances_no_faults) + 2.5 * np.std(distances_no_faults)
distances_faults = test(anomalous_data,arr_min, arr_max, curve_function, network, model_manager, number_of_model_parameters, sequence_len)
if retrain_data != None:
long_term_memory = retrain(retrain_data, arr_min, arr_max, long_term_memory, curve_function, model_manager, sequence_len)
distances_no_faults_lt = test(test_data,arr_min, arr_max, curve_function, long_term_memory, model_manager, number_of_model_parameters, sequence_len)
threshold_lt = np.mean(distances_no_faults_lt) + 2.5 * np.std(distances_no_faults_lt)
distances_faults_lt = test(anomalous_data,arr_min, arr_max, curve_function, long_term_memory, model_manager, number_of_model_parameters, sequence_len)
#distance plots
plt.figure(1)
plt.plot(np.ones(number_of_sequences) * threshold, 'g')
plt.plot(distances_no_faults, 'b')
plt.plot(distances_faults, 'r')
plt.ylim((0, 1))
plt.title(title)
plt.xlabel('t')
plt.ylabel('distance')
plt.figure(2)
plt.plot(np.ones(number_of_sequences) * threshold_lt, 'g')
plt.plot(distances_no_faults_lt, 'b')
plt.plot(distances_faults_lt, 'r')
plt.ylim((0, 1))
plt.title(title)
plt.xlabel('t')
plt.ylabel('distance')
def test_sensor_feature_trial(title, \
train, retrain, test, \
train_data, retrain_data, test_data, anomalous_data, \
model_manager, sequence_len, number_of_sequences, use_bad_memory):
network, arr_min, arr_max = train(train_data, model_manager, sequence_len)
long_term_memory = deepcopy(network)
distances_no_faults,_ = test(test_data, arr_min, arr_max, network, model_manager, sequence_len)
threshold = np.mean(distances_no_faults) + 2.5 * np.std(distances_no_faults)
distances_faults,_ = test(anomalous_data, arr_min, arr_max, network, model_manager, sequence_len)
if retrain_data != None:
long_term_memory = retrain(retrain_data, arr_min, arr_max, long_term_memory, None, model_manager, sequence_len)
distances_no_faults_lt,_ = test(test_data, arr_min, arr_max, long_term_memory, model_manager, sequence_len)
threshold_lt = np.mean(distances_no_faults_lt) + 2.5 * np.std(distances_no_faults_lt)
distances_faults_lt,_ = test(anomalous_data, arr_min, arr_max, long_term_memory, model_manager, sequence_len)
#distance plot
plt.figure(1)
plt.plot(np.ones(number_of_sequences) * threshold, 'g')
plt.plot(distances_no_faults, 'b')
plt.plot(distances_faults, 'r')
plt.ylim((0, 1))
plt.title(title)
plt.xlabel('t')
plt.ylabel('distance')
plt.figure(2)
plt.plot(np.ones(number_of_sequences) * threshold_lt, 'g')
plt.plot(distances_no_faults_lt, 'b')
plt.plot(distances_faults_lt, 'r')
plt.ylim((0, 1))
plt.title(title)
plt.xlabel('t')
plt.ylabel('distance')
def generic_test(train, retrain, test, curve_functions, model_manager, number_of_model_parameters, sequence_len, number_of_sequences):
for i in xrange(len(number_of_model_parameters)):
figure = plt.figure(1)
figure_lt = plt.figure(2)
figure.suptitle('Noise-free data: ' + str(number_of_model_parameters[i]) + ' parameters')
figure_lt.suptitle('Noise-free data: ' + str(number_of_model_parameters[i]) + ' parameters')
anomalous_data = np.zeros(100)
#############################
# Uniformly distributed data
#############################
train_data = np.random.rand(1000)
retrain_data = list()
for _ in xrange(10):
retrain_data.append(np.random.rand(1000))
test_data = np.random.rand(100)
test_trial(1, figure, figure_lt, 'Uniform', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
#############################
# Normally distributed data
#############################
train_data = np.random.normal(0, 1, 1000)
retrain_data = list()
for _ in xrange(10):
retrain_data.append(np.random.normal(0, 1, 1000))
test_data = np.random.normal(0, 1, 100)
test_trial(2, figure, figure_lt, 'Normal', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
#################################
# Exponentially distributed data
#################################
train_data = np.random.exponential(1, 1000)
retrain_data = list()
for _ in xrange(10):
retrain_data.append(np.random.exponential(1, 1000))
test_data = np.random.exponential(1, 100)
test_trial(3, figure, figure_lt, 'Exponential', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
##########################
# Exponential plus Normal
##########################
train_data = np.random.exponential(1, 1000) + np.random.normal(0, 1, 1000)
retrain_data = list()
for _ in xrange(10):
retrain_data.append(np.random.exponential(1, 1000) + np.random.normal(0, 1, 1000))
test_data = np.random.exponential(1, 100) + np.random.normal(0, 1, 100)
test_trial(4, figure, figure_lt, 'Exponential plus normal', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
##########################
# Exponential times Normal
##########################
train_data = np.random.exponential(1, 1000) * np.random.normal(0, 1, 1000)
retrain_data = list()
for _ in xrange(10):
retrain_data.append(np.random.exponential(1, 1000) * np.random.normal(0, 1, 1000))
test_data = np.random.exponential(1, 100) * np.random.normal(0, 1, 100)
test_trial(5, figure, figure_lt, 'Exponential times normal', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
#######################
# Uniform times Normal
#######################
train_data = np.random.rand(1000) * np.random.normal(0, 1, 1000)
retrain_data = list()
for _ in xrange(10):
retrain_data.append(np.random.rand(1000) * np.random.normal(0, 1, 1000))
test_data = np.random.rand(100) * np.random.normal(0, 1, 100)
test_trial(6, figure, figure_lt, 'Uniform times normal', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
#############
# Log-normal
#############
train_data = np.random.lognormal(0., 1., 1000)
retrain_data = list()
for _ in xrange(10):
retrain_data.append(np.random.lognormal(0., 1., 1000))
test_data = np.random.lognormal(0., 1., 100)
test_trial(7, figure, figure_lt, 'Log-normal', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
########################
# Log-normal times Beta
########################
train_data = np.random.lognormal(0., 1., 1000) * np.random.beta(5.,3.,1000)
retrain_data = list()
for _ in xrange(10):
retrain_data.append(np.random.lognormal(0., 1., 1000) * np.random.beta(5.,3.,1000))
test_data = np.random.lognormal(0., 1., 100) * np.random.beta(5.,3.,100)
test_trial(8, figure, figure_lt, 'Log-normal times beta', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
####################
# Normal times Beta
####################
train_data = np.random.normal(4,10,1000) * np.random.beta(5.,3.,1000)
retrain_data = list()
for _ in xrange(10):
retrain_data.append(np.random.normal(4,10,1000) * np.random.beta(5.,3.,1000))
test_data = np.random.normal(4,10,100) * np.random.beta(5.,3.,100)
test_trial(9, figure, figure_lt, 'Normal times beta', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
plt.show()
#############
# Noisy data
#############
for i in xrange(len(number_of_model_parameters)):
figure = plt.figure(1)
figure.suptitle('Noisy data: ' + str(number_of_model_parameters[i]) + ' parameters')
figure_lt = plt.figure(2)
figure_lt.suptitle('Noisy data: ' + str(number_of_model_parameters[i]) + ' parameters')
#############################
# Uniformly distributed data
#############################
train_data = list(np.random.rand(950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.rand(950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.rand(95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_trial(1, figure, figure_lt, 'Uniform', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
#############################
# Normally distributed data
#############################
train_data = list(np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_trial(2, figure, figure_lt, 'Normal', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
#################################
# Exponentially distributed data
#################################
train_data = list(np.random.exponential(1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.exponential(1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.exponential(1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_trial(3, figure, figure_lt, 'Exponential', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
##########################
# Exponential plus Normal
##########################
train_data = list(np.random.exponential(1, 950) + np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.exponential(1, 950) + np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.exponential(1, 95) + np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_trial(4, figure, figure_lt, 'Exponential plus normal', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
##########################
# Exponential times Normal
##########################
train_data = list(np.random.exponential(1, 950) * np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.exponential(1, 950) * np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.exponential(1, 95) * np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_trial(5, figure, figure_lt, 'Exponential times normal', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
#######################
# Uniform times Normal
#######################
train_data = list(np.random.rand(950) * np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.rand(950) * np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.rand(95) * np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_trial(6, figure, figure_lt, 'Uniform times normal', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
#############
# Log-normal
#############
train_data = list(np.random.lognormal(0., 1., 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.lognormal(0., 1., 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.lognormal(0., 1., 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_trial(7, figure, figure_lt, 'Log-normal', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
########################
# Log-normal times beta
########################
train_data = list(np.random.lognormal(0., 1., 950) * np.random.beta(5.,3.,950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.lognormal(0., 1., 950) * np.random.beta(5.,3.,950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.lognormal(0., 1., 95) * np.random.beta(5.,3.,95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_trial(8, figure, figure_lt, 'Log-normal times beta', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
####################
# Normal times beta
####################
train_data = list(np.random.normal(4,10,950) * np.random.beta(5.,3.,950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.normal(4,10,950) * np.random.beta(5.,3.,950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.normal(4,10,95) * np.random.beta(5.,3.,95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_trial(9, figure, figure_lt, 'Normal times beta', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
plt.show()
def generic_feature_test(train, retrain, train_with_models, test, model_manager, sequence_len, number_of_sequences):
figure = plt.figure(1)
figure_lt = plt.figure(2)
figure.suptitle('Noise-free data')
figure_lt.suptitle('Noise-free data')
anomalous_data = np.zeros(100)
#############################
# Uniformly distributed data
#############################
train_data = np.random.rand(1000)
retrain_data = list()
for i in xrange(10):
retrain_data.append(np.random.rand(1000))
test_data = np.random.rand(100)
test_feature_trial(1, figure, figure_lt, 'Uniform', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
#############################
# Normally distributed data
#############################
train_data = np.random.normal(0, 1, 1000)
retrain_data = list()
for i in xrange(10):
retrain_data.append(np.random.normal(0, 1, 1000))
test_data = np.random.normal(0, 1, 100)
test_feature_trial(2, figure, figure_lt, 'Normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
#################################
# Exponentially distributed data
#################################
train_data = np.random.exponential(1, 1000)
retrain_data = np.random.exponential(1, 1000)
retrain_data = list()
for i in xrange(10):
retrain_data.append(np.random.exponential(1, 1000))
test_feature_trial(3, figure, figure_lt, 'Exponential', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
##########################
# Exponential plus Normal
##########################
train_data = np.random.exponential(1, 1000) + np.random.normal(0, 1, 1000)
retrain_data = list()
for i in xrange(10):
retrain_data.append(np.random.exponential(1, 1000) + np.random.normal(0, 1, 1000))
test_data = np.random.exponential(1, 100) + np.random.normal(0, 1, 100)
test_feature_trial(4, figure, figure_lt, 'Exponential plus normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
##########################
# Exponential times Normal
##########################
train_data = np.random.exponential(1, 1000) * np.random.normal(0, 1, 1000)
retrain_data = list()
for i in xrange(10):
retrain_data.append(np.random.exponential(1, 1000) * np.random.normal(0, 1, 1000))
test_data = np.random.exponential(1, 100) * np.random.normal(0, 1, 100)
test_feature_trial(5, figure, figure_lt, 'Exponential times normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
#######################
# Uniform times Normal
#######################
train_data = np.random.rand(1000) * np.random.normal(0, 1, 1000)
retrain_data = list()
for i in xrange(10):
retrain_data.append(np.random.rand(1000) * np.random.normal(0, 1, 1000))
test_data = np.random.rand(100) * np.random.normal(0, 1, 100)
test_feature_trial(6, figure, figure_lt, 'Uniform times normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
#############
# Log-normal
#############
train_data = np.random.lognormal(0., 1., 1000)
retrain_data = list()
for i in xrange(10):
retrain_data.append(np.random.lognormal(0., 1., 1000))
test_data = np.random.lognormal(0., 1., 100)
test_feature_trial(7, figure, figure_lt, 'Log-normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
########################
# Log-normal times Beta
########################
train_data = np.random.lognormal(0., 1., 1000) * np.random.beta(5.,3.,1000)
retrain_data = list()
for i in xrange(10):
retrain_data.append(np.random.lognormal(0., 1., 1000) * np.random.beta(5.,3.,1000))
test_data = np.random.lognormal(0., 1., 100) * np.random.beta(5.,3.,100)
test_feature_trial(8, figure, figure_lt, 'Log-normal times beta', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
####################
# Normal times Beta
####################
train_data = np.random.normal(4,10,1000) * np.random.beta(5.,3.,1000)
retrain_data = list()
for i in xrange(10):
retrain_data.append(np.random.normal(4,10,1000) * np.random.beta(5.,3.,1000))
test_data = np.random.normal(4,10,100) * np.random.beta(5.,3.,100)
test_feature_trial(9, figure, figure_lt, 'Normal times beta', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
plt.show()
#############
# Noisy data
#############
figure = plt.figure(1)
figure.suptitle('Noisy data')
figure_lt = plt.figure(2)
figure_lt.suptitle('Noisy data')
#############################
# Uniformly distributed data
#############################
train_data = list(np.random.rand(950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.rand(950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.rand(95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(1, figure, figure_lt, 'Uniform', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
#############################
# Normally distributed data
#############################
train_data = list(np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(2, figure, figure_lt, 'Normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
#################################
# Exponentially distributed data
#################################
train_data = list(np.random.exponential(1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.exponential(1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.exponential(1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(3, figure, figure_lt, 'Exponential', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
##########################
# Exponential plus Normal
##########################
train_data = list(np.random.exponential(1, 950) + np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.exponential(1, 950) + np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.exponential(1, 95) + np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(4, figure, figure_lt, 'Exponential plus normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
##########################
# Exponential times Normal
##########################
train_data = list(np.random.exponential(1, 950) * np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.exponential(1, 950) * np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.exponential(1, 95) * np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(5, figure, figure_lt, 'Exponential times normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
#######################
# Uniform times Normal
#######################
train_data = list(np.random.rand(950) * np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.rand(950) * np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.rand(95) * np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(6, figure, figure_lt, 'Uniform times normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
#############
# Log-normal
#############
train_data = list(np.random.lognormal(0., 1., 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.lognormal(0., 1., 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.lognormal(0., 1., 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(7, figure, figure_lt, 'Log-normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
########################
# Log-normal times beta
########################
train_data = list(np.random.lognormal(0., 1., 950) * np.random.beta(5.,3.,950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.lognormal(0., 1., 950) * np.random.beta(5.,3.,950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.lognormal(0., 1., 95) * np.random.beta(5.,3.,95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(8, figure, figure_lt, 'Log-normal times beta', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
####################
# Normal times beta
####################
train_data = list(np.random.normal(4,10,950) * np.random.beta(5.,3.,950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.normal(4,10,950) * np.random.beta(5.,3.,950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.normal(4,10,95) * np.random.beta(5.,3.,95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(9, figure, figure_lt, 'Normal times beta', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences)
plt.show()
def generic_feature_bad_memory_test(train, retrain, train_with_models, test, model_manager, sequence_len, number_of_sequences, use_bad_memory):
figure = plt.figure(1)
figure_lt = plt.figure(2)
figure.suptitle('Noise-free data')
figure_lt.suptitle('Noise-free data')
anomalous_data = np.zeros(100)
#############################
# Uniformly distributed data
#############################
train_data = np.random.rand(1000)
retrain_data = list()
for i in xrange(10):
data = list(np.random.rand(950))
for j in xrange(50):
data.append(0)
retrain_data.append(np.array(data))
test_data = np.random.rand(100)
test_feature_trial(1, figure, figure_lt, 'Uniform', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
#############################
# Normally distributed data
#############################
train_data = np.random.normal(0, 1, 1000)
retrain_data = list()
for i in xrange(10):
data = list(np.random.normal(0, 1, 950))
for j in xrange(50):
data.append(0)
retrain_data.append(np.array(data))
test_data = np.random.normal(0, 1, 100)
test_feature_trial(2, figure, figure_lt, 'Normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
#################################
# Exponentially distributed data
#################################
train_data = np.random.exponential(1, 1000)
retrain_data = np.random.exponential(1, 1000)
retrain_data = list()
for i in xrange(10):
data = list(np.random.exponential(1, 950))
for j in xrange(50):
data.append(0)
retrain_data.append(np.array(data))
test_feature_trial(3, figure, figure_lt, 'Exponential', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
##########################
# Exponential plus Normal
##########################
train_data = np.random.exponential(1, 1000) + np.random.normal(0, 1, 1000)
retrain_data = list()
for i in xrange(10):
data = list(np.random.exponential(1, 950) + np.random.normal(0, 1, 950))
for j in xrange(50):
data.append(0)
retrain_data.append(np.array(data))
test_data = np.random.exponential(1, 100) + np.random.normal(0, 1, 100)
test_feature_trial(4, figure, figure_lt, 'Exponential plus normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
##########################
# Exponential times Normal
##########################
train_data = np.random.exponential(1, 1000) * np.random.normal(0, 1, 1000)
retrain_data = list()
for i in xrange(10):
data = list(np.random.exponential(1, 950) * np.random.normal(0, 1, 950))
for j in xrange(50):
data.append(0)
retrain_data.append(np.array(data))
test_data = np.random.exponential(1, 100) * np.random.normal(0, 1, 100)
test_feature_trial(5, figure, figure_lt, 'Exponential times normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
#######################
# Uniform times Normal
#######################
train_data = np.random.rand(1000) * np.random.normal(0, 1, 1000)
retrain_data = list()
for i in xrange(10):
data = list(np.random.rand(950) * np.random.normal(0, 1, 950))
for j in xrange(50):
data.append(0)
retrain_data.append(np.array(data))
test_data = np.random.rand(100) * np.random.normal(0, 1, 100)
test_feature_trial(6, figure, figure_lt, 'Uniform times normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
#############
# Log-normal
#############
train_data = np.random.lognormal(0., 1., 1000)
retrain_data = list()
for i in xrange(10):
data = list(np.random.lognormal(0., 1., 950))
for j in xrange(50):
data.append(0)
retrain_data.append(np.array(data))
test_data = np.random.lognormal(0., 1., 100)
test_feature_trial(7, figure, figure_lt, 'Log-normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
########################
# Log-normal times Beta
########################
train_data = np.random.lognormal(0., 1., 1000) * np.random.beta(5.,3.,1000)
retrain_data = list()
for i in xrange(10):
data = list(np.random.lognormal(0., 1., 950) * np.random.beta(5.,3.,950))
for j in xrange(50):
data.append(0)
retrain_data.append(np.array(data))
test_data = np.random.lognormal(0., 1., 100) * np.random.beta(5.,3.,100)
test_feature_trial(8, figure, figure_lt, 'Log-normal times beta', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
####################
# Normal times Beta
####################
train_data = np.random.normal(4,10,1000) * np.random.beta(5.,3.,1000)
retrain_data = list()
for i in xrange(10):
data = list(np.random.normal(4,10,950) * np.random.beta(5.,3.,950))
for j in xrange(50):
data.append(0)
retrain_data.append(np.array(data))
test_data = np.random.normal(4,10,100) * np.random.beta(5.,3.,100)
test_feature_trial(9, figure, figure_lt, 'Normal times beta', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
plt.show()
#############
# Noisy data
#############
figure = plt.figure(1)
figure.suptitle('Noisy data')
figure_lt = plt.figure(2)
figure_lt.suptitle('Noisy data')
#############################
# Uniformly distributed data
#############################
train_data = list(np.random.rand(950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.rand(950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.rand(95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(1, figure, figure_lt, 'Uniform', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
#############################
# Normally distributed data
#############################
train_data = list(np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(2, figure, figure_lt, 'Normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
#################################
# Exponentially distributed data
#################################
train_data = list(np.random.exponential(1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.exponential(1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.exponential(1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(3, figure, figure_lt, 'Exponential', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
##########################
# Exponential plus Normal
##########################
train_data = list(np.random.exponential(1, 950) + np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.exponential(1, 950) + np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.exponential(1, 95) + np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(4, figure, figure_lt, 'Exponential plus normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
##########################
# Exponential times Normal
##########################
train_data = list(np.random.exponential(1, 950) * np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.exponential(1, 950) * np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.exponential(1, 95) * np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(5, figure, figure_lt, 'Exponential times normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
#######################
# Uniform times Normal
#######################
train_data = list(np.random.rand(950) * np.random.normal(0, 1, 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.rand(950) * np.random.normal(0, 1, 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.rand(95) * np.random.normal(0, 1, 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(6, figure, figure_lt, 'Uniform times normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
#############
# Log-normal
#############
train_data = list(np.random.lognormal(0., 1., 950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.lognormal(0., 1., 950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.lognormal(0., 1., 95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(7, figure, figure_lt, 'Log-normal', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
########################
# Log-normal times beta
########################
train_data = list(np.random.lognormal(0., 1., 950) * np.random.beta(5.,3.,950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.lognormal(0., 1., 950) * np.random.beta(5.,3.,950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.lognormal(0., 1., 95) * np.random.beta(5.,3.,95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(8, figure, figure_lt, 'Log-normal times beta', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
####################
# Normal times beta
####################
train_data = list(np.random.normal(4,10,950) * np.random.beta(5.,3.,950))
for j in xrange(50):
index = np.random.randint(0, len(train_data))
train_data.insert(index, 10.)
train_data = np.array(train_data)
retrain_data = list()
for j in xrange(10):
retrain_data.append(list(np.random.normal(4,10,950) * np.random.beta(5.,3.,950)))
for _ in xrange(50):
index = np.random.randint(0, len(retrain_data[j]))
retrain_data[j].insert(index, 10.)
retrain_data[j] = np.array(retrain_data[j])
test_data = list(np.random.normal(4,10,95) * np.random.beta(5.,3.,95))
for j in xrange(5):
index = np.random.randint(0, len(test_data))
test_data.insert(index, 10.)
test_data = np.array(test_data)
test_feature_trial(9, figure, figure_lt, 'Normal times beta', train, retrain, train_with_models, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, use_bad_memory)
plt.show()
def generic_sensor_test(train_data, retrain_data, test_data, anomalous_data, train, retrain, test, curve_functions, model_manager, number_of_model_parameters, sequence_len, number_of_sequences):
for i in xrange(len(number_of_model_parameters)):
test_sensor_trial('', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, curve_functions[i], number_of_model_parameters[i], sequence_len, number_of_sequences)
plt.show()
def generic_sensor_feature_test(train_data, retrain_data, test_data, anomalous_data, train, retrain, train_with_models, test, model_manager, sequence_len, number_of_sequences, use_bad_memory):
test_sensor_feature_trial('', train, retrain, test, train_data, retrain_data, test_data, anomalous_data, model_manager, sequence_len, number_of_sequences, False)
plt.show()
|
|
#!/usr/bin/python
__author__ = "Ryan Plyler"
__version__ = 0.2
import sys
import json
import os
########################################################################
# Config
########################################################################
TODO_FILENAME = os.path.join(os.getcwd(), '.todo.list')
########################################################################
# Global Classes: bcolors Status
########################################################################
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
RECV = '\033[33m' # yellow
FAIL = '\033[91m'
ENDC = '\033[0m'
INFO = '\033[37m'
WHITE = '\033[97m'
class Status:
PENDING = "PENDING"
DONE = " DONE"
########################################################################
# Helper Fuctions: usage() nextID()
########################################################################
def usage():
print "\nUsage:"
print "\ttodo | List the todos for the current directory"
print "\ttodo show|list | Same as 'todo'"
print "\ttodo new <new todo> | Create a new todo"
print "\ttodo complete|done <todo-id> | Mark a todo as complete"
print "\ttodo remove|rm|delete|del <todo-id> | Remove a todo"
print "\ttodo undo <todo-id> | Undo a 'DONE' todo. Make it pending again."
print "\ttodo purge | Delete all todos and todo savedata for the cwd"
print "\ttodo help | Show this help"
print
def getLineCount():
with open(TODO_FILENAME) as f:
lines = f.readlines()
return len(lines)
def readlines():
with open(TODO_FILENAME) as f:
lines = f.readlines()
linecount = len(lines)
return lines, linecount
def nextID():
"""Get the the number of what the next todo ID should be"""
return getLineCount() + 1
########################################################################
# Core functionality functions:
# newTodo() removeTodo(id) completeTodo(id) undoTodo(id)
# showTodos()
########################################################################
def newTodo(content):
formmated = bcolors.WHITE + "[" + "%id" + "] " + bcolors.ENDC + Status.PENDING + ": " + content + "\n"
with open(TODO_FILENAME, "a") as f:
f.write(formmated)
print "Added todo #%d" % getLineCount()
def removeTodo(id):
id = int(id)
lineCounter = 1
lines, linecount = readlines()
todoRemoved = False
newFile = open(TODO_FILENAME, 'w')
for line in lines:
# Write all the lines back to the file except the line number of id
if lineCounter is not id:
newFile.write(line)
else:
todoRemoved = True
# increment the line counter
lineCounter += 1
newFile.close()
if todoRemoved:
print "Removed todo #%s" % id
else:
print "No todo #%s found" % id
def completeTodo(id):
id = int(id)
lines, linecount = readlines()
todoCompleted = False
newFile = open(TODO_FILENAME, 'w')
lineCounter = 1
for line in lines:
# Write all the lines back to the file except the line number of id
if lineCounter == id:
line = line.replace(Status.PENDING, Status.DONE)
newFile.write(line)
todoCompleted = True
else:
newFile.write(line)
# increment the line counter
lineCounter += 1
newFile.close()
if todoCompleted:
print "Completed todo #%s" % id
else:
print "No todo #%s found." % id
def undoTodo(id):
# oldFile = open(TODO_FILENAME, 'r')
# lines = oldFile.readlines()
# oldFile.close()
# todoCompleted = False
# newFile = open(TODO_FILENAME, 'w')
# idFormmated = "[" + str(id) + "]"
#
# for line in lines:
# if idFormmated in line:
# line = line.replace(Status.DONE, Status.PENDING)
# newFile.write(line)
# todoCompleted = True
# else:
# newFile.write(line)
#
# newFile.close()
# if todoCompleted:
# print "Undid todo #" + id + " now its pending again..."
# else:
# print "No todo #" + id + " found."
id = int(id)
lines, linecount = readlines()
todoCompleted = False
newFile = open(TODO_FILENAME, 'w')
lineCounter = 1
for line in lines:
# Write all the lines back to the file except the line number of id
if lineCounter == id:
line = line.replace(Status.DONE, Status.PENDING)
newFile.write(line)
todoCompleted = True
else:
newFile.write(line)
# increment the line counter
lineCounter += 1
newFile.close()
if todoCompleted:
print "Undid todo #%s" % id
else:
print "No todo #%s found." % id
def showTodos():
lineCounter = 1
try:
lines, linecount = readlines()
for line in lines:
# if Status.PENDING in line:
# line = line.replace(Status.PENDING, bcolors.FAIL + Status.PENDING + bcolors.ENDC)
# elif Status.DONE in line:
# line = line.replace(Status.DONE, bcolors.OKGREEN + Status.DONE + bcolors.ENDC)
# sys.stdout.write(line)
# Auto assign the todo ID based on the the line its on in the todo.list file
line = line.replace("%id", str(lineCounter))
if Status.PENDING in line:
line = line.replace(Status.PENDING, bcolors.FAIL + Status.PENDING + bcolors.ENDC)
elif Status.DONE in line:
line = line.replace(Status.DONE, bcolors.OKGREEN + Status.DONE + bcolors.ENDC)
sys.stdout.write(line)
lineCounter += 1
except IOError:
print "No todos created for this directory yet"
########################################################################
# Parse command line arguments
########################################################################
if len(sys.argv) == 1:
showTodos()
elif sys.argv[1] == "new":
content = " ".join(sys.argv[2:])
newTodo(content)
elif sys.argv[1] == "complete" or sys.argv[1] == "done":
completeTodo(sys.argv[2])
elif sys.argv[1] == "undo":
undoTodo(sys.argv[2])
elif sys.argv[1] == "remove" or sys.argv[1] == "delete" or sys.argv[1] == "del" or sys.argv[1] == "rm":
if len(sys.argv) < 3:
print "You must specify a todo ID to remove."
else:
removeTodo(sys.argv[2])
elif sys.argv[1] == "show" or sys.argv[1] == "list":
showTodos()
elif sys.argv[1] == "help":
usage()
elif sys.argv[1] == "purge":
ans = raw_input("Are you sure you want to delete and remove all traces of todos? (y/n): ")
if ans == 'y':
if os.path.isfile(TODO_FILENAME):
os.remove(str(TODO_FILENAME))
print "Removed todo file"
else:
print "Could not delete todo file"
else:
print "Aborting deletion"
else:
print "Unknown operation: " + sys.argv[1]
usage()
########################################################################
# Cleanup and exit
########################################################################
|
|
from matplotlib import pyplot as plt, patches
from scipy.spatial import distance
from sklearn.manifold import MDS
import numpy as np
import os
import basics
import rater_analysis
import svg_polygons
import Voronoi
import geometry
# Globals
label_font_size = 10 # points
axis_font_size = 8 # points
legend_font_size = 10 # points
figure_width = 5.5 # inches
def plot_all(chain_wide_palette=True, use_rgb=False, spectrum=[0.5, 1.0], show_prototypes=False, label_cells=False, join_contiguous_cells=False, save_location=False):
for experiment in range(0, len(basics.chain_codes)):
plot_experiment(experiment+1, chain_wide_palette, use_rgb, spectrum, show_prototypes, label_cells, join_contiguous_cells, save_location)
def plot_experiment(experiment, chain_wide_palette=True, use_rgb=False, spectrum=[0.5, 1.0], show_prototypes=False, label_cells=False, join_contiguous_cells=False, save_location=False):
# Set directory for saving, and create it if it doesn't exist
if save_location == False:
save_location = basics.desktop_location
save_location += str(experiment) + '/'
if os.path.exists(save_location) == True:
if raw_input(save_location + ' already exists. Do you want to overwrite? (y/n) ') != 'y':
return
else:
os.makedirs(save_location)
for chain in basics.chain_codes[experiment-1]:
print('Chain: ' + chain)
plot_chain(chain, experiment, chain_wide_palette, use_rgb, spectrum, show_prototypes, label_cells, join_contiguous_cells, False, save_location)
def plot_chain(chain, experiment=None, chain_wide_palette=True, use_rgb=False, spectrum=[0.5, 1.0], show_prototypes=False, label_cells=False, join_contiguous_cells=False, random_seed=False, save_location=False):
# Determine experiment number if none is supplied
if experiment == None:
experiment = basics.determine_experiment_number(chain)
# If one palette has been requested, get all strings from entire chain and create a colour palette
if chain_wide_palette == True:
print('Generating colour palette...')
all_strings = []
for generation in range(0, 11):
all_strings += basics.getWords(experiment, chain, generation, 's')
colour_palette, random_seed = generate_colour_palette(all_strings, use_rgb, spectrum, random_seed)
else:
colour_palette = None
# Set directory for saving, and create it if it doesn't exist
if save_location == False:
save_location = basics.desktop_location
save_location += chain + '_' + str(random_seed) + '/'
if os.path.exists(save_location) == True:
if raw_input(save_location + ' already exists. Do you want to overwrite? (y/n) ') != 'y':
return
else:
os.makedirs(save_location)
# Produce a plot for each generation
print('Generating graphics...')
for generation in range(0, 11):
plot(chain, generation, experiment, colour_palette, use_rgb, spectrum, show_prototypes, label_cells, join_contiguous_cells, False, random_seed, save_location)
def plot(chain, generation, experiment=None, colour_palette=None, use_rgb=False, spectrum=[0.5, 1.0], show_prototypes=False, label_cells=False, join_contiguous_cells=False, colour_candidates=False, random_seed=False, save_location=False):
# Determine experiment number if none supplied
if experiment == None:
experiment = basics.determine_experiment_number(chain)
# Get strings and triangles for this generation
strings = basics.getWords(experiment, chain, generation, 's')
triangles = basics.getTriangles(experiment, chain, generation, 's')
# Pick a colour palette if none has been supplied
if colour_palette == None:
colour_palette, random_seed = generate_colour_palette(strings, use_rgb, spectrum, random_seed)
chain_palette = False
else:
chain_palette = True
if type(colour_candidates) == int:
candidate_num = '_' + str(random_seed)
else:
candidate_num = ''
# Organize strings and triangles into categories
word_dict = {}
triangle_dict = {}
for i in range(0, len(strings)):
if strings[i] in word_dict.keys():
word_dict[strings[i]].append(i)
triangle_dict[strings[i]].append(triangles[i])
else:
word_dict[strings[i]] = [i]
triangle_dict[strings[i]] = [triangles[i]]
# Set up subplot in top left
plt.subplots(figsize=(figure_width, figure_width/1.375))
ax1 = plt.subplot2grid((11,2), (0,0), rowspan=7)
# Determine the optimum size for the grid of triangle images / grid of legend labels
# (a square number larger than the number of unique strings)
for square in [1, 4, 9, 16, 25, 36, 49]:
if square >= len(word_dict.keys()):
break
grid_size = int(np.sqrt(square))
# Rearrange words so that they'll appear in alphabetical order along rows of the legend
words = rearrange(word_dict.keys(), grid_size)
# Plot MDS coordinates and the Voronoi polygons
for word in words:
indices = word_dict[word]
colour, colour_light = colour_palette[word]
X, Y = triangle_coordinates[indices, 0], triangle_coordinates[indices, 1]
plt.scatter(X, Y, c=colour_light, label=word, marker='o', s=12, linewidth=0, zorder=0)
plt.scatter(X, Y, c=colour, marker='o', s=12, linewidth=0, zorder=2)
if join_contiguous_cells == True:
regional_polys = Voronoi.join_contiguous_polygons(voronoi_polygons[indices])
for poly in regional_polys:
ax1.add_patch(patches.Polygon(poly, facecolor=colour_light, edgecolor='white', linewidth=0.5, zorder=1))
else:
for i in indices:
ax1.add_patch(patches.Polygon(voronoi_polygons[i], facecolor=colour_light, edgecolor='white', linewidth=0.5, zorder=0))
if label_cells == True:
x, y = centroid(voronoi_polygons[i])
ax1.text(x, y, word, {'fontsize':5}, ha='center', va='center')
# Set axis style
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel("MDS dimension 1", fontsize=label_font_size)
plt.ylabel("MDS dimension 2", fontsize=label_font_size)
plt.xticks(fontsize=axis_font_size)
plt.yticks(fontsize=axis_font_size)
# Set up subplot at bottom for legend
ax2 = plt.subplot2grid((11,2), (7,0), colspan=2)
plt.axis('off')
# Produce the legend
handles, labels = ax1.get_legend_handles_labels()
ax2.legend(handles, labels, loc='upper center', bbox_to_anchor=[0.45, 0.5], frameon=False, prop={'size':legend_font_size}, ncol=grid_size, scatterpoints=1, handletextpad=0.01, markerscale=2.5)
# Tighten plot layout
plt.tight_layout(pad=0.2, h_pad=0.0)
# Determine filename and directory if none has been specified
if type(save_location) == bool and save_location == False:
save_location = basics.desktop_location
if chain_palette == True:
filename = save_location + chain + str(generation) + '.svg'
else:
filename = save_location + chain + str(generation) + '_' + str(random_seed) + '.svg'
# Save matplotlib plot as SVG file
plt.savefig(filename)
plt.close()
# Draw the triangle images and splice them into the matplotlib SVG file
triangle_code = draw_triangles(triangle_dict, colour_palette, show_prototypes, grid_size)
splice_in_triangles(filename, triangle_code)
# If multiple colour palette candidates have been requested, run plot() again.
if colour_candidates > 1:
plot(chain, generation, experiment, None, use_rgb, spectrum, show_prototypes, label_cells, join_contiguous_cells, colour_candidates-1, False, save_location)
def generate_colour_palette(strings, use_rgb=False, spectrum=[0.0, 1.0], random_seed=False):
# Get list of unique strings
words = list(set(strings))
# If there's only one word, just map that word to a grey colour and return, since
# it won't make sense to arrange the words in colour space.
if len(words) == 1:
return {words[0] : ('#B1B0CB', '#D8D8E5')}
# Create distance matrix giving normalized Levenshtein distances between the words
string_distances = np.array(basics.stringDistances(words), dtype=float)
string_distance_matrix = distance.squareform(string_distances, 'tomatrix')
if type(random_seed) != int:
# Pick a random number for the MDS algorithm
random_seed = np.random.randint(1, 1000000)
hex_colour_values = []
if use_rgb == True:
# Run distance matrix through MDS to determine the position of each word in 3-dimensional space
string_mds = MDS(dissimilarity='precomputed', n_components=3, n_init=25, max_iter=2000, random_state=random_seed)
string_coordinates = string_mds.fit_transform(string_distance_matrix)
# Scale the dimensions of the space over the interval [0, 255] to create an RGB colour space.
# The spectrum argument determines how much of the colour space will be used, allowing you to
# avoid very dark and very light colours.
for dim in range(0, 3):
minimum = string_coordinates[:, dim].min()
difference = string_coordinates[:, dim].max() - minimum
string_coordinates[:, dim] = (((string_coordinates[:, dim] - minimum) / difference) * (255 * (spectrum[1] - spectrum[0]))) + (255 * spectrum[0])
# Convert RGB values to hexadecimal triplets (the light version is for the Voronoi cells)
for r, g, b in string_coordinates:
hex_colour = rgb_to_hex((r, g, b))
hex_colour_light = rgb_to_hex(lighten((r, g, b)))
hex_colour_values.append((hex_colour, hex_colour_light))
else:
# Run distance matrix through MDS to determine the position of each word in 2-dimensional space
string_mds = MDS(dissimilarity='precomputed', n_components=2, n_init=25, max_iter=2000, random_state=random_seed)
string_coordinates = string_mds.fit_transform(string_distance_matrix)
# Convert Cartesian coordinates to polar coordinates
polar_coordinates = np.array([polarize(point) for point in string_coordinates])
# Rescale the saturation coordinates in the specified spectrum
minimum = polar_coordinates[:, 1].min()
difference = polar_coordinates[:, 1].max() - minimum
polar_coordinates[:, 1] = (((polar_coordinates[:, 1] - minimum) / difference) * (spectrum[1] - spectrum[0])) + (spectrum[0])
# Convert HSV values to hexadecimal triplets via RGB, keeping V (brightness) constant
# The light version is for the Voronoi cells
for h, s in polar_coordinates:
hex_colour = rgb_to_hex(hsv_to_rgb(h, s, 0.8))
hex_colour_light = rgb_to_hex(hsv_to_rgb(h, s, 1.0))
hex_colour_values.append((hex_colour, hex_colour_light))
#print('Correspondence: %s' % correspondence_correlation(string_distances, string_coordinates))
#print('Stress-1: %s' % stress_1(string_mds.stress_, string_distances))
# Return the colour palette and the random seed
return dict(zip(words, hex_colour_values)), random_seed
def draw_triangles(triangles, colour_palette, show_prototypes, grid_size):
# Alphabetize words so they can be plotted alphabetically
words = sorted(triangles.keys())
# Set up a Canvas object and clear it (WHY THE HELL DOES IT NEED TO BE CLEARED!!!)
canvas = svg_polygons.Canvas(figure_width*72, (figure_width/1.375)*72)
canvas.clear()
# Determine the size of each triangle cell, giving 5 points of cell spacing
point_size = (171.2 / grid_size) - 5.0
# Determine scaling factor by which all triangles will need to be scaled
scale_factor = point_size / 500.0
# Determine the radius of the orienting spots and the width of the strokes
radius = 8.0 * scale_factor
stroke = max([0.5, 2.0 * scale_factor])
# Start at cell 0,0
x_position = 0
y_position = 0
# For each of the words...
for word in words:
# Determine the offset and colour, and draw the bounding box to the canvas
offset = np.array([(figure_width*72*0.575) + (x_position * point_size) + (x_position * 5.0), 6.45 + (y_position * point_size) + (y_position * 5.0)])
colour, colour_light = colour_palette[word]
canvas.add_box(offset, point_size, point_size)
# For each triangle labelled by this word...
for triangle in triangles[word]:
# Translate and scale the triangle, and draw it to the canvas
trans_triangle = (triangle * scale_factor) + offset
canvas.add_polygon(trans_triangle, border_colour=colour, stroke_width=stroke)
canvas.add_circle(trans_triangle[0], radius, border_colour=colour, fill_colour=colour)
# If there's more than one triangle in the set, produce a prototype and draw it to the canvas
if len(triangles[word]) > 1 and show_prototypes == True:
prototype = make_prototype(triangles[word], False)
trans_prototype = (prototype * scale_factor) + offset
canvas.add_polygon(trans_prototype, border_colour=colour, fill_colour=colour_light, stroke_width=stroke)
# Increment the x and y positions
if x_position < grid_size-1:
x_position += 1
else:
x_position = 0
y_position += 1
# Turn the canvas objects into SVG code
canvas.write_everything()
# Return the SVG code for the canvas
return canvas.canvas
def make_prototype(triangles, spot_based=True):
trans_triangles = []
for t in triangles:
# Centralize the triangle in the plane
t += np.array([250.0, 250.0]) - geometry.centroid(t)
# If non-spot-based pototype is requested, swap the vertices around so that vertex 1 is
# the pointiest one.
if spot_based == False:
angles = [geometry.angle(t,1), geometry.angle(t,2), geometry.angle(t,3)]
min_angle = angles.index(min(angles))
if min_angle == 0: t = np.array([t[0], t[1], t[2]])
elif min_angle == 1: t = np.array([t[1], t[2], t[0]])
elif min_angle == 2: t = np.array([t[2], t[0], t[1]])
# Rotate the triangle around its centroid so that vertex 1 points North
t = geometry.rotate(t)
# Ensure that vertex 2 is to the left of vertex 3 to prevent cancelling out
if t[1,0] > t[2,0]:
t = np.array([t[0], t[2], t[1]])
trans_triangles.append(t)
# Reformat as Numpy array and take the mean of the coordinates to form the prototype
trans_triangles = np.asarray(trans_triangles, dtype=float)
prototype = trans_triangles.mean(axis=0)
# Shift the prototype such that its bounding box is vertically centralized in the plane
prototype[:, 1] += ((500.0 - (max([prototype[1,1], prototype[2,1]]) - prototype[0,1])) / 2.0) - prototype[0,1]
return prototype
# Rearrange a list of words so that when displayed in a Matplotlib legend, they will be
# alphabetical along the rows, rather than down the columns.
def rearrange(words, grid_size):
words = sorted(words)
words_rearranged = []
for i in range(grid_size):
for j in range(grid_size):
try:
words_rearranged.append(words[(j*grid_size)+i])
except IndexError:
break
return words_rearranged
# Opens an SVG file and splices in some extra SVG code at the end
def splice_in_triangles(filename, triangle_code):
f = open(filename, 'r')
graph_code = f.read()
f.close()
final_code = graph_code.replace('</svg>', triangle_code + '\n\n</svg>')
f = open(filename, 'w')
f.write(final_code)
f.close()
# Convert RGB value to hexadecimal triplet
def rgb_to_hex(rgb):
return '#' + ''.join(map(chr, map(int, map(round, rgb)))).encode('hex')
# Convert hue [0,2pi], saturation [0,1], and brightness [0,1] into RGB
def hsv_to_rgb(h, s, v):
if s == 0.0: return v*255, v*255, v*255 # saturation is 0, so return white
h /= 2 * np.pi # scale hue (expressed in radians) in [0,1]
i = int(h*6.)
f = (h*6.)-i
p, q, t = v*(1.-s), v*(1.-s*f), v*(1.-s*(1.-f))
i %= 6
if i == 0: return v*255, t*255, p*255
elif i == 1: return q*255, v*255, p*255
elif i == 2: return p*255, v*255, t*255
elif i == 3: return p*255, q*255, v*255
elif i == 4: return t*255, p*255, v*255
return v*255, p*255, q*255
# Lighten a colour by blending in 50% white
def lighten(rgb):
return light(rgb[0]), light(rgb[1]), light(rgb[2])
def light(val):
return int(round(val + ((255 - val) * 0.5)))
# Return the centroid of an arbitrary polygon
# https://en.wikipedia.org/wiki/Centroid#Centroid_of_polygon
def centroid(polygon):
n = len(polygon)
a_sum, x_sum, y_sum = 0.0, 0.0, 0.0
for i in range(0, n):
if i == n - 1: j = 0
else: j = i + 1
p = (polygon[i][0] * polygon[j][1]) - (polygon[j][0] * polygon[i][1])
a_sum += p
x_sum += (polygon[i][0] + polygon[j][0]) * p
y_sum += (polygon[i][1] + polygon[j][1]) * p
f = 1.0 / (6.0 * (0.5 * a_sum))
return f * x_sum, f * y_sum
# Calculate the Euclidean distance in n-dimensional space
def ED(a, b):
return np.sqrt(sum([(a[i]-b[i])**2 for i in range(0, len(a))]))
# Convert Cartesian coordinates to polar coordinates
def polarize(xy):
# Angular coordinate
phi = np.arctan2(xy[1], xy[0])
if phi < 0.0:
phi += 2.0 * np.pi
# Radial coordinate
r = np.sqrt(xy[0]**2 + xy[1]**2)
return phi, r
# Calculate the correspondence correlation - how well do the distances in
# MDS space correlate with the original distances
def correspondence_correlation(distances, mds_coordinates):
n = len(mds_coordinates)
mds_distances = [ED(mds_coordinates[i], mds_coordinates[j]) for i in range(n) for j in range(i+1, n)]
return np.corrcoef(distances, mds_distances)[0,1]
# Calculate stress-1
def stress_1(raw_stress, distances):
return np.sqrt(raw_stress / sum(distances ** 2))
# Get dissimilarity ratings and format as square distance matrix
triangle_distances = rater_analysis.reliable_distance_array
triangle_distance_matrix = distance.squareform(triangle_distances, 'tomatrix')
# Run ratings through MDS to get coordinates in 2-dimensional space
triangle_mds = MDS(dissimilarity="precomputed", n_components=2, n_init=25, max_iter=2000, random_state=10)
triangle_coordinates = triangle_mds.fit_transform(triangle_distance_matrix)
# Scale each dimension over the interval [-0.9, 0.9] for a tidy plot
for dim in range(0, triangle_coordinates.shape[1]):
minimum = triangle_coordinates[:, dim].min()
difference = triangle_coordinates[:, dim].max() - minimum
triangle_coordinates[:, dim] = (((triangle_coordinates[:, dim] - minimum) / difference) * 1.8) - 0.9
# Compute the Voronoi polygons for these MDS coordinates
voronoi_polygons = Voronoi.polygons(triangle_coordinates, [[-1,-1], [-1,1], [1,1], [1,-1]])
# Print MDS goodness-of-fit stats
#print('Correspondence: %s' % correspondence_correlation(triangle_distances, triangle_coordinates))
#print('Stress-1: %s' % stress_1(triangle_mds.stress_, triangle_distances))
|
|
from game import Mode
from dmd import TextLayer, GroupedLayer
class ServiceModeSkeleton(Mode):
"""Service Mode List base class."""
def __init__(self, game, priority, font):
super(ServiceModeSkeleton, self).__init__(game, priority)
self.name = ""
self.title_layer = TextLayer(1, 1, font, "left")
self.item_layer = TextLayer(128/2, 12, font, "center")
self.instruction_layer = TextLayer(1, 25, font, "left")
self.layer = GroupedLayer(128, 32, [self.title_layer, self.item_layer, self.instruction_layer])
self.no_exit_switch = game.machine_type == 'sternWhitestar'
def mode_started(self):
self.title_layer.set_text(str(self.name))
self.game.sound.play('service_enter')
def mode_stopped(self):
self.game.sound.play('service_exit')
def disable(self):
pass
def sw_down_active(self, sw):
if self.game.switches.enter.is_active():
self.game.modes.remove(self)
return True
def sw_exit_active(self, sw):
self.game.modes.remove(self)
return True
class ServiceModeList(ServiceModeSkeleton):
"""Service Mode List base class."""
def __init__(self, game, priority, font):
super(ServiceModeList, self).__init__(game, priority, font)
self.items = []
def mode_started(self):
super(ServiceModeList, self).mode_started()
self.iterator = 0
self.change_item()
def change_item(self):
ctr = 0
for item in self.items:
if (ctr == self.iterator):
self.item = item
ctr += 1
self.max = ctr - 1
self.item_layer.set_text(self.item.name)
def sw_up_active(self,sw):
if self.game.switches.enter.is_inactive():
self.item.disable()
if (self.iterator < self.max):
self.iterator += 1
self.game.sound.play('service_next')
self.change_item()
return True
def sw_down_active(self,sw):
if self.game.switches.enter.is_inactive():
self.item.disable()
if (self.iterator > 0):
self.iterator -= 1
self.game.sound.play('service_previous')
self.change_item()
elif self.no_exit_switch:
self.exit()
return True
def sw_enter_active(self,sw):
self.game.modes.add(self.item)
return True
def exit(self):
self.item.disable()
self.game.modes.remove(self)
return True
class ServiceMode(ServiceModeList):
"""Service Mode."""
def __init__(self, game, priority, font, extra_tests=[]):
super(ServiceMode, self).__init__(game, priority,font)
#self.title_layer.set_text('Service Mode')
self.name = 'Service Mode'
self.tests = Tests(self.game, self.priority+1, font, extra_tests)
self.items = [self.tests]
if len(self.game.settings) > 0:
self.settings = Settings(self.game, self.priority+1, font, 'Settings', self.game.settings)
self.items.append(self.settings)
# if len(self.game.game_data) > 0:
# self.statistics = Statistics(self.game, self.priority+1, font, 'Statistics', self.game.game_data)
# self.items.append(self.statistics)
class Tests(ServiceModeList):
"""Service Mode."""
def __init__(self, game, priority, font, extra_tests=[]):
super(Tests, self).__init__(game, priority,font)
#self.title_layer.set_text('Tests')
self.name = 'Tests'
self.lamp_test = LampTest(self.game, self.priority+1, font)
self.coil_test = CoilTest(self.game, self.priority+1, font)
self.switch_test = SwitchTest(self.game, self.priority+1, font)
self.items = [self.switch_test, self.lamp_test, self.coil_test]
for test in extra_tests:
self.items.append(test)
class LampTest(ServiceModeList):
"""Lamp Test"""
def __init__(self, game, priority, font):
super(LampTest, self).__init__(game, priority,font)
self.name = "Lamp Test"
self.items = self.game.lamps
def change_item(self):
super(LampTest, self).change_item()
self.item.schedule(schedule=0x00ff00ff, cycle_seconds=0, now=True)
def sw_enter_active(self,sw):
return True
class CoilTest(ServiceModeList):
"""Coil Test"""
def __init__(self, game, priority, font):
super(CoilTest, self).__init__(game, priority, font)
self.name = "Coil Test"
self.title_layer.set_text('Coil Test - Enter btn: mode')
self.instruction_layer.set_text('Pulse with start button')
self.items = self.game.coils
def mode_started(self):
super(CoilTest, self).mode_started()
self.action = 'manual'
if self.game.lamps.has_key('startButton'): self.game.lamps.startButton.schedule(schedule=0xff00ff00, cycle_seconds=0, now=False)
self.delay(name='auto', event_type=None, delay=2.0, handler=self.process_auto)
def process_auto(self):
if (self.action == 'auto'):
self.item.pulse(20)
self.delay(name='auto', event_type=None, delay=2.0, handler=self.process_auto)
def sw_enter_active(self,sw):
if (self.action == 'manual'):
self.action = 'auto'
if self.game.lamps.has_key('startButton'): self.game.lamps.startButton.disable()
self.instruction_layer.set_text('Auto pulse')
elif (self.action == 'auto'):
self.action = 'manual'
if self.game.lamps.has_key('startButton'): self.game.lamps.startButton.schedule(schedule=0xff00ff00, cycle_seconds=0, now=False)
self.instruction_layer.set_text('Pulse with start button')
return True
def sw_startButton_active(self,sw):
if (self.action == 'manual'):
self.item.pulse(20)
return True
class SwitchTest(ServiceModeSkeleton):
"""Switch Test"""
def __init__(self, game, priority, font):
super(SwitchTest, self).__init__(game, priority,font)
self.name = "Switch Test"
for switch in self.game.switches:
if self.game.machine_type == 'sternWhitestar':
add_handler = 1
elif switch != self.game.switches.exit:
add_handler = 1
else:
add_handler = 0
if add_handler:
self.add_switch_handler(name=switch.name, event_type='inactive', delay=None, handler=self.switch_handler)
self.add_switch_handler(name=switch.name, event_type='active', delay=None, handler=self.switch_handler)
def switch_handler(self, sw):
if (sw.state):
self.game.sound.play('service_switch_edge')
self.item_layer.set_text(sw.name + ' - ' + str(sw.state))
return True
def sw_enter_active(self,sw):
return True
class Statistics(ServiceModeList):
"""Service Mode."""
def __init__(self, game, priority, font, name, itemlist):
super(Statistics, self).__init__(game, priority,font)
#self.title_layer.set_text('Settings')
self.name = name
self.items = []
for section in itemlist:
self.items.append( StatsDisplay( self.game, priority + 1, font, str(section),itemlist[section] ))
class StatsDisplay(ServiceModeList):
"""Coil Test"""
def __init__(self, game, priority, font, name, itemlist):
super(StatsDisplay, self).__init__(game, priority, font)
self.name = name
self.value_layer = TextLayer(128/2, 22, font, "center")
self.items = []
for item in sorted(itemlist.iterkeys()):
if type(itemlist[item])==type({}):
self.items.append( HighScoreItem(str(item), itemlist[item]['name'], itemlist[item]['score']) )
else:
self.items.append( StatsItem(str(item), itemlist[item]) )
self.layer = GroupedLayer(128, 32, [self.title_layer, self.item_layer, self.value_layer])
def mode_started(self):
super(StatsDisplay, self).mode_started()
def change_item(self):
super(StatsDisplay, self).change_item()
try:
self.item.score
except:
self.item.score = 'None'
if self.item.score == 'None':
self.value_layer.set_text(str(self.item.value))
else:
self.value_layer.set_text(self.item.value + ": " + str(self.item.score))
def sw_enter_active(self, sw):
return True
class StatsItem:
"""Service Mode."""
def __init__(self, name, value):
self.name = name
self.value = value
def disable(self):
pass
class HighScoreItem:
"""Service Mode."""
def __init__(self, name, value, score):
self.name = name
self.value = value
self.score = score
def disable(self):
pass
class SwitchTest(ServiceModeSkeleton):
"""Switch Test"""
def __init__(self, game, priority, font):
super(SwitchTest, self).__init__(game, priority,font)
self.name = "Switch Test"
for switch in self.game.switches:
if self.game.machine_type == 'sternWhitestar':
add_handler = 1
elif switch != self.game.switches.exit:
add_handler = 1
else:
add_handler = 0
if add_handler:
self.add_switch_handler(name=switch.name, event_type='inactive', delay=None, handler=self.switch_handler)
self.add_switch_handler(name=switch.name, event_type='active', delay=None, handler=self.switch_handler)
def switch_handler(self, sw):
if (sw.state):
self.game.sound.play('service_switch_edge')
self.item_layer.set_text(sw.name + ' - ' + str(sw.state))
return True
def sw_enter_active(self,sw):
return True
class Settings(ServiceModeList):
"""Service Mode."""
def __init__(self, game, priority, font, name, itemlist):
super(Settings, self).__init__(game, priority,font)
#self.title_layer.set_text('Settings')
self.name = name
self.items = []
self.font = font
for section in sorted(itemlist.iterkeys()):
self.items.append( SettingsEditor( self.game, priority + 1, font, str(section),itemlist[section] ))
class SettingsEditor(ServiceModeList):
"""Service Mode."""
def __init__(self, game, priority, font, name, itemlist):
super(SettingsEditor, self).__init__(game, priority, font)
self.title_layer = TextLayer(1, 1, font, "left")
self.item_layer = TextLayer(128/2, 12, font, "center")
self.instruction_layer = TextLayer(1, 25, font, "left")
self.no_exit_switch = game.machine_type == 'sternWhitestar'
#self.title_layer.set_text('Settings')
self.name = name
self.items = []
self.value_layer = TextLayer(128/2, 19, font, "center")
self.layer = GroupedLayer(128, 32, [self.title_layer, self.item_layer, self.value_layer, self.instruction_layer])
for item in sorted(itemlist.iterkeys()):
#self.items.append( EditItem(str(item), itemlist[item]['options'], itemlist[item]['value'] ) )
if 'increments' in itemlist[item]:
num_options = (itemlist[item]['options'][1]-itemlist[item]['options'][0]) / itemlist[item]['increments']
option_list = []
for i in range(0,num_options):
option_list.append(itemlist[item]['options'][0] + (i * itemlist[item]['increments']))
self.items.append( EditItem(str(item), option_list, self.game.user_settings[self.name][item]) )
else:
self.items.append( EditItem(str(item), itemlist[item]['options'], self.game.user_settings[self.name][item]) )
self.state = 'nav'
self.stop_blinking = True
self.item = self.items[0]
self.value_layer.set_text(str(self.item.value))
self.option_index = self.item.options.index(self.item.value)
def mode_started(self):
super(SettingsEditor, self).mode_started()
def mode_stopped(self):
self.game.sound.play('service_exit')
def sw_enter_active(self, sw):
if not self.no_exit_switch:
self.process_enter()
return True
def process_enter(self):
if self.state == 'nav':
self.state = 'edit'
self.blink = True
self.stop_blinking = False
self.delay(name='blink', event_type=None, delay=.3, handler=self.blinker)
else:
self.state = 'nav'
self.instruction_layer.set_text("Change saved")
self.delay(name='change_complete', event_type=None, delay=1, handler=self.change_complete)
self.game.sound.play('service_save')
self.game.user_settings[self.name][self.item.name]=self.item.value
self.stop_blinking = True
self.game.save_settings()
def sw_exit_active(self, sw):
self.process_exit()
return True
def process_exit(self):
if self.state == 'nav':
self.game.modes.remove(self)
else:
self.state = 'nav'
self.value_layer.set_text(str(self.item.value))
self.stop_blinking = True
self.game.sound.play('service_cancel')
self.instruction_layer.set_text("Change cancelled")
self.delay(name='change_complete', event_type=None, delay=1, handler=self.change_complete)
def sw_up_active(self, sw):
if self.game.switches.enter.is_inactive():
self.process_up()
else:
self.process_enter()
return True
def process_up(self):
if self.state == 'nav':
self.item.disable()
if (self.iterator < self.max):
self.iterator += 1
self.game.sound.play('service_next')
self.change_item()
else:
if self.option_index < (len(self.item.options) - 1):
self.option_index += 1
self.item.value = self.item.options[self.option_index]
self.value_layer.set_text(str(self.item.value))
def sw_down_active(self, sw):
if self.game.switches.enter.is_inactive():
self.process_down()
else:
self.process_exit()
return True
def process_down(self):
if self.state == 'nav':
self.item.disable()
if (self.iterator > 0):
self.iterator -= 1
self.game.sound.play('service_previous')
self.change_item()
else:
if self.option_index > 0:
self.option_index -= 1
self.item.value = self.item.options[self.option_index]
self.value_layer.set_text(str(self.item.value))
def change_item(self):
ctr = 0
for item in self.items:
if ctr == self.iterator:
self.item = item
ctr += 1
self.max = ctr - 1
self.item_layer.set_text(self.item.name)
self.value_layer.set_text(str(self.item.value))
self.option_index = self.item.options.index(self.item.value)
def disable(self):
pass
def blinker(self):
if self.blink:
self.value_layer.set_text(str(self.item.value))
self.blink = False
else:
self.value_layer.set_text("")
self.blink = True
if not self.stop_blinking:
self.delay(name='blink', event_type=None, delay=.3, handler=self.blinker)
else:
self.value_layer.set_text(str(self.item.value))
def change_complete(self):
self.instruction_layer.set_text("")
class EditItem:
"""Service Mode."""
def __init__(self, name, options, value):
self.name = name
self.options = options
self.value = value
def disable(self):
pass
|
|
from common.tests import ViewTestCase
from common import api
from common import clean
from common import exception
from common import util
class LoginTest(ViewTestCase):
def test_login_when_signed_out(self):
r = self.login_and_get(None, '/login')
self.assertContains(r, "Forgot your password?")
self.assertContains(r, "Sign Up Now")
self.assertTemplateUsed(r, 'login/templates/login.html')
def test_login_when_signed_in(self):
r = self.login_and_get('popular', '/login')
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login_redirect_to(self):
r = self.login_and_get('popular', '/login', {'redirect_to': '/channel'})
r = self.assertRedirectsPrefix(r, '/channel')
self.assertTemplateUsed(r, 'channel/templates/index.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login(self):
log = 'popular'
pwd = self.passwords[clean.nick(log)]
r = self.client.post('/login', {'log': log, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login_with_confirmed_email(self):
log = 'hotness'
pwd = self.passwords[clean.nick(log)]
confirmed_email = '[email protected]'
r = self.client.post('/login', {'log': confirmed_email, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/hotness/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login_bad_password(self):
log = 'popular'
pwd = 'BAD PASSWORD'
r = self.client.post('/login', {'log': log, 'pwd': pwd})
self.assert_error_contains(r, 'Invalid username or password')
self.assertTemplateUsed(r, 'login/templates/login.html')
def test_login_bad_user(self):
log = 'BAD USER'
pwd = 'BAD PASSWORD'
r = self.client.post('/login', {'log': log, 'pwd': pwd})
self.assert_error_contains(r, 'Invalid username or password')
self.assertTemplateUsed(r, 'login/templates/login.html')
def test_login_user_cleanup(self):
log = 'broken'
pwd = self.passwords[clean.nick(log)]
actor_ref_pre = api.actor_get(api.ROOT, log)
self.assert_(not actor_ref_pre.normalized_nick)
self.assertRaises(exception.ApiException,
api.stream_get_presence,
api.ROOT,
log)
self.assertRaises(exception.ApiException,
api.stream_get_comment,
api.ROOT,
log)
r = self.client.post('/login', {'log': log, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/broken/overview')
actor_ref_post = api.actor_get(api.ROOT, log)
self.assert_(actor_ref_post.normalized_nick)
self.assert_(api.stream_get_presence(api.ROOT, log))
self.assert_(api.stream_get_comment(api.ROOT, log))
def test_login_deleted(self):
log = 'popular'
pwd = self.passwords[clean.nick(log)]
r = self.client.post('/login', {'log': log, 'pwd': pwd})
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
self.assertTemplateUsed(r, 'actor/templates/overview.html')
self.assertTemplateUsed(r, 'common/templates/flash.html')
api.actor_remove(api.ROOT, 'popular')
r = self.client.post('/login', {'log': log, 'pwd': pwd})
self.assert_error_contains(r, 'Invalid username')
self.assertTemplateUsed(r, 'login/templates/login.html')
# Test cases and expected outcomes:
# 'annoying', 'girlfriend' do not have an emails associated
# 'hermit' has an unconfirmed email
class LoginForgotTest(ViewTestCase):
##### Forgot password tests:
def test_login_forgot_already_logged_in(self):
r = self.login_and_get('popular', '/login/forgot')
# User gets sent back to the home page. Unfortunately, since this is
# 'prefix', it will match a redirect anywhere. :(
r = self.assertRedirectsPrefix(r, '/', target_status_code=302)
# For this reason, test the second redirect:
r = self.assertRedirectsPrefix(r, '/user/popular/overview')
def test_login_forgot(self):
r = self.client.get('/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
def test_login_forgot_nick_popular(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'popular',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
def test_login_reset(self):
self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'popular',
})
email = api.email_get_actor(api.ROOT, 'popular')
activation_ref = api.activation_get(api.ROOT,
email,
'password_lost',
email)
self.assert_(activation_ref)
hash = util.hash_generic(activation_ref.code)
r = self.client.get('/login/reset', {'email' : email, 'hash' : hash})
self.assertContains(r, 'Your password has been reset')
# once it's used, the activation link cannot be used again
r = self.client.get('/login/reset', {'email' : email, 'hash' : hash})
self.assertRedirectsPrefix(r, '/error', target_status_code=200)
# User enters 'popular', 'popular' has a confirmed email.
# - Send notification to that email.
def test_nick_confirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'popular',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# User enters 'hermit', 'hermit' has an unconfirmed email
# - Send notification to that email.
def test_nick_unconfirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'hermit',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# TODO(termie): stub
# User enters 'popular', 'popular' has an unconfirmed email (shared with other
# users)
# - Send notification to that email.
def test_nick_multiple_unconfirmed(self):
pass
# User enters 'annoying', 'annoying' does not have an email
# - Tough shit.
def test_nick_no_email(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'annoying',
})
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'does not have an email')
# User enters a user that doesn't exist
# - Tough shit.
def test_unknown_nick(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'idontexist',
})
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'not found')
# User enters '[email protected]', a confirmed email for 'popular'
# - Send notification to that email.
def test_email_confirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : '[email protected]',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# User enters '[email protected]', an unconfirmed email for 'hermit'
# - Send notification to that email
def test_email_unconfirmed(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : '[email protected]',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
# TODO(termie): stub
# User enters '[email protected]', an unconfirmed email for 'popular', 'unpopular'
# - Tough shit.
def test_email_multiple_unconfirmed(self):
pass
# User enters '[email protected]', which doesn't map to anything
# - Tough shit.
def test_email_notfound(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : '[email protected]',
})
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'does not match any accounts')
class LoginResetTest(ViewTestCase):
#def test_mixed_case(self):
# activation_ref = api.activation_create(api.ROOT, '[email protected]', 'password_lost', '[email protected]')
# code = util.hash_generic(activation_ref)
def test_login_forgot_nick_mixed_case(self):
r = self.client.post('/login/forgot',
{
'_nonce': util.create_nonce(None, 'login_forgot'),
'login_forgot' : '',
'nick_or_email' : 'CapitalPunishment',
})
r = self.assertRedirectsPrefix(r, '/login/forgot')
self.assertTemplateUsed(r, 'login/templates/forgot.html')
self.assertContains(r, 'New Password Emailed')
self.assertTemplateUsed(r, 'common/templates/flash.html')
class LogoutTest(ViewTestCase):
# Once user is logged out, we should not display the "Signed in as XXX" msg.
# See issue 336 for details
def test_logout_does_not_remain_signed_in(self):
r = self.login_and_get('popular', '/login')
self.assertRedirectsPrefix(r, '/user/popular/overview')
r = self.client.get('/logout')
self.assertTemplateUsed(r, 'login/templates/logout.html')
self.assertNotContains(r, "Signed in as")
|
|
# Copyright (c) 2019, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/spherical_functions/blob/master/LICENSE>
"""Evaluating Wigner D matrices, spin-weighted spherical harmonics, and related.
This module contains code for evaluating the Wigner 3j symbols, the Wigner D
matrices, scalar spherical harmonics, and spin-weighted spherical harmonics.
The code is wrapped by numba where possible, allowing the results to be
delivered at speeds approaching or exceeding speeds attained by pure C code.
"""
from ._version import __version__
__all__ = ['Wigner3j', 'Wigner_D_element', 'Wigner_D_matrices', 'SWSH', 'SWSH_grid',
'factorial', 'binomial_coefficient', 'ladder_operator_coefficient']
import functools
import numpy as np
import numba as nb
from math import factorial
import os.path
jit = njit = functools.partial(nb.njit, cache=True)
jitclass = nb.experimental.jitclass
# Module constants
ell_max = 32 # More than 29, and you get roundoff building quickly
epsilon = 1.e-15
error_on_bad_indices = True
# The coefficient files
# binomial_coefficients.npy
# ladder_operator_coefficients.npy
# Wigner_coefficients.npy
# were originally produced with the code in `_generate_coefficients.py`.
# Factorial
factorials = np.array([float(factorial(i)) for i in range(171)])
@njit('f8(i8)')
def factorial(i):
return factorials[i]
# Binomial coefficients
_binomial_coefficients = np.load(os.path.join(os.path.dirname(__file__), 'binomial_coefficients.npy'))
@njit('f8(i8,i8)')
def binomial_coefficient(n, k):
return _binomial_coefficients[(n * (n + 1)) // 2 + k]
# Ladder-operator coefficients
_ladder_operator_coefficients = np.load(os.path.join(os.path.dirname(__file__), 'ladder_operator_coefficients.npy'))
@njit('f8(i8,i8)')
def _ladder_operator_coefficient(twoell, twom):
return _ladder_operator_coefficients[((twoell + 2) * twoell + twom) // 2]
@njit('f8(f8,f8)')
def ladder_operator_coefficient(ell, m):
return _ladder_operator_coefficient(round(2*ell), round(2*m))
# Coefficients used in constructing the Wigner D matrices
_Wigner_coefficients = np.load(os.path.join(os.path.dirname(__file__), 'Wigner_coefficients.npy'))
@njit('i8(i8,i8,i8)')
def _Wigner_index(twoell, twomp, twom):
return twoell*((2*twoell + 3)*twoell + 1) // 6 + (twoell + twomp)//2 * (twoell + 1) + (twoell + twom)//2
@njit('f8(i8,i8,i8)')
def _Wigner_coefficient(twoell, twomp, twom):
return _Wigner_coefficients[_Wigner_index(twoell, twomp, twom)]
@njit('f8(f8,f8,f8)')
def Wigner_coefficient(ell, mp, m):
return _Wigner_coefficient(round(2*ell), round(2*mp), round(2*m))
def LM_range(ell_min, ell_max):
"""Array of (ell,m) indices in standard order
This function returns an array of essentially
[[ell,m] for ell in range(ell_min, ell_max+1)
for m in range(-ell,ell+1)]
This is, for example, the order assumed for mode data in the `waveforms`
module.
"""
# # Sympy commands to calculate the total size:
# from sympy import symbols, summation
# ell_min,ell,ell_max = symbols('ell_min,ell,ell_max', integer=True)
# summation((2*ell + 1), (ell, ell_min, ell_max))
LM = np.empty((ell_max * (ell_max + 2) - ell_min ** 2 + 1, 2), dtype=int)
_LM_range(ell_min, ell_max, LM)
return LM
@njit('void(i8,i8,i8[:,:])')
def _LM_range(ell_min, ell_max, LM):
i = 0
for ell in range(ell_min, ell_max + 1):
for m in range(-ell, ell + 1):
LM[i, 0] = ell
LM[i, 1] = m
i += 1
@njit('i8(i8,i8,i8)')
def LM_index(ell, m, ell_min):
"""Array index for given (ell,m) mode
Assuming an array of
[[ell,m] for ell in range(ell_min, ell_max+1)
for m in range(-ell,ell+1)]
this function returns the index of the (ell,m) element. (Note that
ell_max doesn't actually come into this calculation, so it is not taken
as an argument to the function.)
This can be calculated in sympy as
from sympy import symbols, summation
ell,m,ell_min, = symbols('ell,m,ell_min,', integer=True)
summation(2*ell + 1, (ell, ell_min, ell-1)) + (ell+m)
"""
return ell * (ell + 1) - ell_min ** 2 + m
@njit('i8(i8,i8)')
def LM_total_size(ell_min, ell_max):
"""Total array size of (ell,m) components
Assuming an array of
[[ell,m] for ell in range(ell_min, ell_max+1)
for m in range(-ell,ell+1)]
this function returns the total size of that array.
This can be calculated in sympy as
from sympy import symbols, summation
ell,ell_min,ell_max = symbols('ell,ell_min,ell_max', integer=True)
summation(2*ell + 1, (ell, ell_min, ell_max))
"""
return ell_max * (ell_max + 2) - ell_min ** 2 + 1
def LM_deduce_ell_max(size, ell_min=0):
ell_max = int(np.sqrt(size + ell_min**2) - 1)
if ell_max * (ell_max + 2) - ell_min ** 2 + 1 != size:
raise ValueError(f"The input size {size} does not correspond to a possible array of modes with ell_min {ell_min}.")
return ell_max
def LMpM_range(ell_min, ell_max):
"""Array of (ell,mp,m) indices in standard order
This function returns an array of essentially
[[ell,mp,m] for ell in range(ell_min, ell_max+1)
for mp in range(-ell,ell+1)
for m in range(-ell,ell+1)]
This is, for instance, the array of indices of the Wigner D matrices
constructed by this module.
"""
# # Sympy commands to calculate the total size:
# from sympy import symbols, summation
# ell_min,ell,ell_max = symbols('ell_min,ell,ell_max', integer=True)
# summation((2*ell + 1)**2, (ell, ell_min, ell_max))
if abs(round(ell_max)-ell_max) > 1e-10 or abs(round(ell_min)-ell_min) > 1e-10:
error = ("LMpM_range is only intended for integer values of ell.\n"
+ "Input values ell_min={0} and ell_max={1} are not valid.\n".format(ell_min, ell_max)
+ "Try `LMpM_range_half_integer` instead.")
raise ValueError(error)
LMpM = np.empty(((ell_max * (11 + ell_max * (12 + 4 * ell_max)) + ell_min * (1 - 4 * ell_min ** 2) + 3) // 3, 3),
dtype=int)
_LMpM_range(ell_min, ell_max, LMpM)
return LMpM
@njit('void(i8,i8,i8[:,:])')
def _LMpM_range(ell_min, ell_max, LMpM):
i = 0
for ell in range(ell_min, ell_max + 1):
for mp in range(-ell, ell + 1):
for m in range(-ell, ell + 1):
LMpM[i, 0] = ell
LMpM[i, 1] = mp
LMpM[i, 2] = m
i += 1
def LMpM_range_half_integer(ell_min, ell_max):
"""Array of (ell,mp,m) indices in standard order, including half-integer values
This function returns an array of essentially
[[twoell/2, twomp/2, twom/2]
for twoell in range(2*ell_min, 2(ell_max+1)
for twomp in range(-twoell, twoell+1, 2)
for twom in range(-twoell, twoell+1, 2)]
See also: LMpM_range
"""
# # Sympy commands to calculate the total size:
# from sympy import symbols, summation
# twoell_min,twoell,twoell_max = symbols('twoell_min,twoell,twoell_max', integer=True)
# summation((twoell + 1)**2, (twoell, twoell_min, twoell_max))
LMpM = np.empty((int(((8*ell_max + 18)*ell_max + 13)*ell_max + 3 - ((8 * ell_min + 6) * ell_min + 1)*ell_min) // 3,
3), dtype=float)
_LMpM_range_half_integer(round(2*ell_min), round(2*ell_max), LMpM)
return LMpM
@njit('void(i8,i8,f8[:,:])')
def _LMpM_range_half_integer(twoell_min, twoell_max, LMpM):
i = 0
for twoell in range(twoell_min, twoell_max + 1):
for twomp in range(-twoell, twoell + 1, 2):
for twom in range(-twoell, twoell + 1, 2):
LMpM[i, 0] = twoell / 2
LMpM[i, 1] = twomp / 2
LMpM[i, 2] = twom / 2
i += 1
@njit('i8(i8,i8,i8,i8)')
def LMpM_index(ell, mp, m, ell_min):
"""Array index for given (ell,mp,m) mode
Assuming an array (e.g., Wigner D matrices) in the order
[[ell,mp,m] for ell in range(ell_min, ell_max+1)
for mp in range(-ell,ell+1)
for m in range(-ell,ell+1)]
this function returns the index of the (ell,mp,m) element. (Note that
ell_max doesn't actually come into this calculation, so it is not taken
as an argument to the function.)
This can be calculated in sympy as
from sympy import symbols, summation
ell,mp,m,ell_min, = symbols('ell,mp,m,ell_min,', integer=True)
summation((2*ell + 1)**2, (ell, ell_min, ell-1)) + (2*ell+1)*(ell+mp) + (ell+m)
"""
# raw output is: 4*ell**3/3 + 2*ell**2 + 2*ell*mp + 5*ell/3 - 4*ell_min**3/3 + ell_min/3 + m + mp
# We rearrange that to act more nicely
return (((4 * ell + 6) * ell + 6 * mp + 5) * ell + ell_min * (1 - 4 * ell_min ** 2) + 3 * (m + mp)) // 3
@njit('i8(i8, i8)')
def LMpM_total_size(ell_min, ell_max):
"""Total array size of Wigner D matrix
Assuming an array (e.g., Wigner D matrices) in the order
[[ell,mp,m] for ell in range(ell_min, ell_max+1)
for mp in range(-ell,ell+1)
for m in range(-ell,ell+1)]
this function returns the total size of that array.
This can be calculated in sympy as
from sympy import symbols, summation
ell,ell_min,ell_max = symbols('ell,ell_min,ell_max', integer=True)
summation((2*ell + 1)**2, (ell, ell_min, ell_max))
"""
# raw output is: 4*ell_max**3/3 + 4*ell_max**2 + 11*ell_max/3 - 4*ell_min**3/3 + ell_min/3 + 1
# We rearrange that to act more nicely
return (((4 * ell_max + 12) * ell_max + 11) * ell_max + (-4 * ell_min ** 2 + 1) * ell_min + 3) // 3
def theta_phi(n_theta, n_phi):
"""Construct (theta, phi) grid
This grid is in the order expected by spinsfast
Parameters
==========
n_theta: int
Number of points in the theta direction
n_phi: int
Number of points in the phi direction
Returns
=======
theta_phi_grid: ndarray
Array of pairs of floats giving the respective [theta, phi] pairs. The shape of this array
is (n_theta, n_phi, 2).
"""
return np.array([[[theta, phi]
for phi in np.linspace(0.0, 2*np.pi, num=n_phi, endpoint=False)]
for theta in np.linspace(0.0, np.pi, num=n_theta, endpoint=True)])
from .WignerD import (Wigner_D_element, _Wigner_D_element,
Wigner_D_matrices, _Wigner_D_matrices,
_linear_matrix_index, _linear_matrix_diagonal_index,
_linear_matrix_offset, _total_size_D_matrices)
from .SWSH import SWSH, SWSH_grid, _SWSH # sYlm, Ylm
from .SWSH_modes import Modes
from .SWSH_grids import Grid
from .mode_conversions import (constant_as_ell_0_mode, constant_from_ell_0_mode,
vector_as_ell_1_modes, vector_from_ell_1_modes,
eth_GHP, ethbar_GHP, eth_NP, ethbar_NP,
ethbar_inverse_NP)
from .multiplication import multiply
from .recursions import complex_powers, Wigner3jCalculator, Wigner3j, clebsch_gordan
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.