text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""
=========
Link Item
=========
"""
import math
from PyQt4.QtGui import (
QGraphicsItem, QGraphicsEllipseItem, QGraphicsPathItem, QGraphicsObject,
QGraphicsTextItem, QGraphicsDropShadowEffect, QPen, QBrush, QColor,
QPainterPath, QTransform
)
from PyQt4.QtCore import Qt, QPointF, QEvent
from .nodeitem import SHADOW_COLOR
from .utils import stroke_path
class LinkCurveItem(QGraphicsPathItem):
"""
Link curve item. The main component of a :class:`LinkItem`.
"""
def __init__(self, parent):
QGraphicsPathItem.__init__(self, parent)
if not isinstance(parent, LinkItem):
raise TypeError("'LinkItem' expected")
self.setAcceptedMouseButtons(Qt.NoButton)
self.__canvasLink = parent
self.setAcceptHoverEvents(True)
self.shadow = QGraphicsDropShadowEffect(
blurRadius=10, color=QColor(SHADOW_COLOR),
offset=QPointF(0, 0)
)
self.normalPen = QPen(QBrush(QColor("#9CACB4")), 2.0)
self.hoverPen = QPen(QBrush(QColor("#7D7D7D")), 2.1)
self.setPen(self.normalPen)
self.setGraphicsEffect(self.shadow)
self.shadow.setEnabled(False)
self.__hover = False
self.__enabled = True
self.__shape = None
def linkItem(self):
"""
Return the :class:`LinkItem` instance this curve belongs to.
"""
return self.__canvasLink
def setHoverState(self, state):
self.prepareGeometryChange()
self.__shape = None
self.__hover = state
self.__update()
def setLinkEnabled(self, state):
self.prepareGeometryChange()
self.__shape = None
self.__enabled = state
self.__update()
def isLinkEnabled(self):
return self.__enabled
def setCurvePenSet(self, pen, hoverPen):
self.prepareGeometryChange()
if pen is not None:
self.normalPen = pen
if hoverPen is not None:
self.hoverPen = hoverPen
self.__shape = None
self.__update()
def shape(self):
if self.__shape is None:
path = self.path()
pen = QPen(QBrush(Qt.black),
max(self.pen().widthF(), 20),
Qt.SolidLine)
self.__shape = stroke_path(path, pen)
return self.__shape
def setPath(self, path):
self.__shape = None
QGraphicsPathItem.setPath(self, path)
def __update(self):
shadow_enabled = self.__hover
if self.shadow.isEnabled() != shadow_enabled:
self.shadow.setEnabled(shadow_enabled)
link_enabled = self.__enabled
if link_enabled:
pen_style = Qt.SolidLine
else:
pen_style = Qt.DashLine
if self.__hover:
pen = self.hoverPen
else:
pen = self.normalPen
pen.setStyle(pen_style)
self.setPen(pen)
class LinkAnchorIndicator(QGraphicsEllipseItem):
"""
A visual indicator of the link anchor point at both ends
of the :class:`LinkItem`.
"""
def __init__(self, *args):
QGraphicsEllipseItem.__init__(self, *args)
self.setRect(-3, -3, 6, 6)
self.setPen(QPen(Qt.NoPen))
self.normalBrush = QBrush(QColor("#9CACB4"))
self.hoverBrush = QBrush(QColor("#7D7D7D"))
self.setBrush(self.normalBrush)
self.__hover = False
def setHoverState(self, state):
"""The hover state is set by the LinkItem.
"""
self.__hover = state
if state:
self.setBrush(self.hoverBrush)
else:
self.setBrush(self.normalBrush)
class LinkItem(QGraphicsObject):
"""
A Link item in the canvas that connects two :class:`.NodeItem`\s in the
canvas.
The link curve connects two `Anchor` items (see :func:`setSourceItem`
and :func:`setSinkItem`). Once the anchors are set the curve
automatically adjusts its end points whenever the anchors move.
An optional source/sink text item can be displayed above the curve's
central point (:func:`setSourceName`, :func:`setSinkName`)
"""
#: Z value of the item
Z_VALUE = 0
def __init__(self, *args):
self.__boundingRect = None
QGraphicsObject.__init__(self, *args)
self.setFlag(QGraphicsItem.ItemHasNoContents, True)
self.setAcceptedMouseButtons(Qt.RightButton | Qt.LeftButton)
self.setAcceptHoverEvents(True)
self.setZValue(self.Z_VALUE)
self.sourceItem = None
self.sourceAnchor = None
self.sinkItem = None
self.sinkAnchor = None
self.curveItem = LinkCurveItem(self)
self.sourceIndicator = LinkAnchorIndicator(self)
self.sinkIndicator = LinkAnchorIndicator(self)
self.sourceIndicator.hide()
self.sinkIndicator.hide()
self.linkTextItem = QGraphicsTextItem(self)
self.__sourceName = ""
self.__sinkName = ""
self.__dynamic = False
self.__dynamicEnabled = False
self.hover = False
self.prepareGeometryChange()
self.__boundingRect = None
def setSourceItem(self, item, anchor=None):
"""
Set the source `item` (:class:`.NodeItem`). Use `anchor`
(:class:`.AnchorPoint`) as the curve start point (if ``None`` a new
output anchor will be created using ``item.newOutputAnchor()``).
Setting item to ``None`` and a valid anchor is a valid operation
(for instance while mouse dragging one end of the link).
"""
if item is not None and anchor is not None:
if anchor not in item.outputAnchors():
raise ValueError("Anchor must be belong to the item")
if self.sourceItem != item:
if self.sourceAnchor:
# Remove a previous source item and the corresponding anchor
self.sourceAnchor.scenePositionChanged.disconnect(
self._sourcePosChanged
)
if self.sourceItem is not None:
self.sourceItem.removeOutputAnchor(self.sourceAnchor)
self.sourceItem = self.sourceAnchor = None
self.sourceItem = item
if item is not None and anchor is None:
# Create a new output anchor for the item if none is provided.
anchor = item.newOutputAnchor()
# Update the visibility of the start point indicator.
self.sourceIndicator.setVisible(bool(item))
if anchor != self.sourceAnchor:
if self.sourceAnchor is not None:
self.sourceAnchor.scenePositionChanged.disconnect(
self._sourcePosChanged
)
self.sourceAnchor = anchor
if self.sourceAnchor is not None:
self.sourceAnchor.scenePositionChanged.connect(
self._sourcePosChanged
)
self.__updateCurve()
def setSinkItem(self, item, anchor=None):
"""
Set the sink `item` (:class:`.NodeItem`). Use `anchor`
(:class:`.AnchorPoint`) as the curve end point (if ``None`` a new
input anchor will be created using ``item.newInputAnchor()``).
Setting item to ``None`` and a valid anchor is a valid operation
(for instance while mouse dragging one and of the link).
"""
if item is not None and anchor is not None:
if anchor not in item.inputAnchors():
raise ValueError("Anchor must be belong to the item")
if self.sinkItem != item:
if self.sinkAnchor:
# Remove a previous source item and the corresponding anchor
self.sinkAnchor.scenePositionChanged.disconnect(
self._sinkPosChanged
)
if self.sinkItem is not None:
self.sinkItem.removeInputAnchor(self.sinkAnchor)
self.sinkItem = self.sinkAnchor = None
self.sinkItem = item
if item is not None and anchor is None:
# Create a new input anchor for the item if none is provided.
anchor = item.newInputAnchor()
# Update the visibility of the end point indicator.
self.sinkIndicator.setVisible(bool(item))
if self.sinkAnchor != anchor:
if self.sinkAnchor is not None:
self.sinkAnchor.scenePositionChanged.disconnect(
self._sinkPosChanged
)
self.sinkAnchor = anchor
if self.sinkAnchor is not None:
self.sinkAnchor.scenePositionChanged.connect(
self._sinkPosChanged
)
self.__updateCurve()
def setFont(self, font):
"""
Set the font for the channel names text item.
"""
if font != self.font():
self.linkTextItem.setFont(font)
self.__updateText()
def font(self):
"""
Return the font for the channel names text.
"""
return self.linkTextItem.font()
def setChannelNamesVisible(self, visible):
"""
Set the visibility of the channel name text.
"""
self.linkTextItem.setVisible(visible)
def setSourceName(self, name):
"""
Set the name of the source (used in channel name text).
"""
if self.__sourceName != name:
self.__sourceName = name
self.__updateText()
def sourceName(self):
"""
Return the source name.
"""
return self.__sourceName
def setSinkName(self, name):
"""
Set the name of the sink (used in channel name text).
"""
if self.__sinkName != name:
self.__sinkName = name
self.__updateText()
def sinkName(self):
"""
Return the sink name.
"""
return self.__sinkName
def _sinkPosChanged(self, *arg):
self.__updateCurve()
def _sourcePosChanged(self, *arg):
self.__updateCurve()
def __updateCurve(self):
self.prepareGeometryChange()
self.__boundingRect = None
if self.sourceAnchor and self.sinkAnchor:
source_pos = self.sourceAnchor.anchorScenePos()
sink_pos = self.sinkAnchor.anchorScenePos()
source_pos = self.curveItem.mapFromScene(source_pos)
sink_pos = self.curveItem.mapFromScene(sink_pos)
# Adaptive offset for the curve control points to avoid a
# cusp when the two points have the same y coordinate
# and are close together
delta = source_pos - sink_pos
dist = math.sqrt(delta.x() ** 2 + delta.y() ** 2)
cp_offset = min(dist / 2.0, 60.0)
# TODO: make the curve tangent orthogonal to the anchors path.
path = QPainterPath()
path.moveTo(source_pos)
path.cubicTo(source_pos + QPointF(cp_offset, 0),
sink_pos - QPointF(cp_offset, 0),
sink_pos)
self.curveItem.setPath(path)
self.sourceIndicator.setPos(source_pos)
self.sinkIndicator.setPos(sink_pos)
self.__updateText()
else:
self.setHoverState(False)
self.curveItem.setPath(QPainterPath())
def __updateText(self):
self.prepareGeometryChange()
self.__boundingRect = None
if self.__sourceName or self.__sinkName:
if self.__sourceName != self.__sinkName:
text = "{0} \u2192 {1}".format(self.__sourceName,
self.__sinkName)
else:
# If the names are the same show only one.
# Is this right? If the sink has two input channels of the
# same type having the name on the link help elucidate
# the scheme.
text = self.__sourceName
else:
text = ""
self.linkTextItem.setPlainText(text)
path = self.curveItem.path()
if not path.isEmpty():
center = path.pointAtPercent(0.5)
angle = path.angleAtPercent(0.5)
brect = self.linkTextItem.boundingRect()
transform = QTransform()
transform.translate(center.x(), center.y())
transform.rotate(-angle)
# Center and move above the curve path.
transform.translate(-brect.width() / 2, -brect.height())
self.linkTextItem.setTransform(transform)
def removeLink(self):
self.setSinkItem(None)
self.setSourceItem(None)
self.__updateCurve()
def setHoverState(self, state):
if self.hover != state:
self.prepareGeometryChange()
self.__boundingRect = None
self.hover = state
self.sinkIndicator.setHoverState(state)
self.sourceIndicator.setHoverState(state)
self.curveItem.setHoverState(state)
def hoverEnterEvent(self, event):
# Hover enter event happens when the mouse enters any child object
# but we only want to show the 'hovered' shadow when the mouse
# is over the 'curveItem', so we install self as an event filter
# on the LinkCurveItem and listen to its hover events.
self.curveItem.installSceneEventFilter(self)
return QGraphicsObject.hoverEnterEvent(self, event)
def hoverLeaveEvent(self, event):
# Remove the event filter to prevent unnecessary work in
# scene event filter when not needed
self.curveItem.removeSceneEventFilter(self)
return QGraphicsObject.hoverLeaveEvent(self, event)
def sceneEventFilter(self, obj, event):
if obj is self.curveItem:
if event.type() == QEvent.GraphicsSceneHoverEnter:
self.setHoverState(True)
elif event.type() == QEvent.GraphicsSceneHoverLeave:
self.setHoverState(False)
return QGraphicsObject.sceneEventFilter(self, obj, event)
def boundingRect(self):
if self.__boundingRect is None:
self.__boundingRect = self.childrenBoundingRect()
return self.__boundingRect
def shape(self):
return self.curveItem.shape()
def setEnabled(self, enabled):
"""
Reimplemented from :class:`QGraphicsObject`
Set link enabled state. When disabled the link is rendered with a
dashed line.
"""
# This getter/setter pair override a property from the base class.
# They should be renamed to e.g. setLinkEnabled/linkEnabled
self.curveItem.setLinkEnabled(enabled)
def isEnabled(self):
return self.curveItem.isLinkEnabled()
def setDynamicEnabled(self, enabled):
"""
Set the link's dynamic enabled state.
If the link is `dynamic` it will be rendered in red/green color
respectively depending on the state of the dynamic enabled state.
"""
if self.__dynamicEnabled != enabled:
self.__dynamicEnabled = enabled
if self.__dynamic:
self.__updatePen()
def isDynamicEnabled(self):
"""
Is the link dynamic enabled.
"""
return self.__dynamicEnabled
def setDynamic(self, dynamic):
"""
Mark the link as dynamic (i.e. it responds to
:func:`setDynamicEnabled`).
"""
if self.__dynamic != dynamic:
self.__dynamic = dynamic
self.__updatePen()
def isDynamic(self):
"""
Is the link dynamic.
"""
return self.__dynamic
def __updatePen(self):
self.prepareGeometryChange()
self.__boundingRect = None
if self.__dynamic:
if self.__dynamicEnabled:
color = QColor(0, 150, 0, 150)
else:
color = QColor(150, 0, 0, 150)
normal = QPen(QBrush(color), 2.0)
hover = QPen(QBrush(color.darker(120)), 2.1)
else:
normal = QPen(QBrush(QColor("#9CACB4")), 2.0)
hover = QPen(QBrush(QColor("#7D7D7D")), 2.1)
self.curveItem.setCurvePenSet(normal, hover)
| {
"content_hash": "920da022618ed3579e92e5b3dbe53a71",
"timestamp": "",
"source": "github",
"line_count": 518,
"max_line_length": 78,
"avg_line_length": 31.61003861003861,
"alnum_prop": 0.5809820447050201,
"repo_name": "qPCR4vir/orange3",
"id": "b6e1f65719bce60d2e6e3776216ca27f029ad1a1",
"size": "16374",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Orange/canvas/canvas/items/linkitem.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12007"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20281"
},
{
"name": "Python",
"bytes": "4205054"
},
{
"name": "Shell",
"bytes": "48335"
}
],
"symlink_target": ""
} |
import sqlite3
from airflow.hooks.dbapi_hook import DbApiHook
class SqliteHook(DbApiHook):
"""
Interact with SQLite.
"""
conn_name_attr = 'sqlite_conn_id'
default_conn_name = 'sqlite_default'
supports_autocommit = False
def get_conn(self):
"""
Returns a sqlite connection object
"""
conn = self.get_connection(self.sqlite_conn_id)
conn = sqlite3.connect(conn.host)
return conn
| {
"content_hash": "2679589396b56ec3cce49f0c88393a42",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 55,
"avg_line_length": 20.863636363636363,
"alnum_prop": 0.6252723311546841,
"repo_name": "RealImpactAnalytics/airflow",
"id": "7f6c655029b69a8c93e0195d5fe36e350910ffe6",
"size": "1273",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/hooks/sqlite_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "270710"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "3765458"
},
{
"name": "Shell",
"bytes": "46923"
}
],
"symlink_target": ""
} |
from google.cloud import retail_v2beta
async def sample_update_control():
# Create a client
client = retail_v2beta.ControlServiceAsyncClient()
# Initialize request argument(s)
control = retail_v2beta.Control()
control.facet_spec.facet_key.key = "key_value"
control.display_name = "display_name_value"
control.solution_types = "SOLUTION_TYPE_SEARCH"
request = retail_v2beta.UpdateControlRequest(
control=control,
)
# Make the request
response = await client.update_control(request=request)
# Handle the response
print(response)
# [END retail_v2beta_generated_ControlService_UpdateControl_async]
| {
"content_hash": "1aa6d90e935540a1ff08751e5eb713a2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 66,
"avg_line_length": 27.541666666666668,
"alnum_prop": 0.7170953101361573,
"repo_name": "googleapis/python-retail",
"id": "8c17bb44a179a6acde31633379e5f271217f7e61",
"size": "2050",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/retail_v2beta_generated_control_service_update_control_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "7420556"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
"""
fs.memoryfs
===========
A Filesystem that exists in memory only. Which makes them extremely fast, but non-permanent.
If you open a file from a `memoryfs` you will get back a StringIO object from the standard library.
"""
import datetime
import stat
from fs.path import iteratepath, pathsplit, normpath
from fs.base import *
from fs.errors import *
from fs import _thread_synchronize_default
from fs.filelike import StringIO
from fs import iotools
from os import SEEK_END
import threading
import six
from six import b
def _check_mode(mode, mode_chars):
for c in mode_chars:
if c not in mode:
return False
return True
class MemoryFile(object):
def seek_and_lock(f):
def deco(self, *args, **kwargs):
try:
self._lock.acquire()
self.mem_file.seek(self.pos)
ret = f(self, *args, **kwargs)
self.pos = self.mem_file.tell()
return ret
finally:
self._lock.release()
return deco
def __init__(self, path, memory_fs, mem_file, mode, lock):
self.closed = False
self.path = path
self.memory_fs = memory_fs
self.mem_file = mem_file
self.mode = mode
self._lock = lock
self.pos = 0
if _check_mode(mode, 'a'):
lock.acquire()
try:
self.mem_file.seek(0, SEEK_END)
self.pos = self.mem_file.tell()
finally:
lock.release()
elif _check_mode(mode, 'w'):
lock.acquire()
try:
self.mem_file.seek(0)
self.mem_file.truncate()
finally:
lock.release()
assert self.mem_file is not None, "self.mem_file should have a value"
def __str__(self):
return "<MemoryFile in %s %s>" % (self.memory_fs, self.path)
def __repr__(self):
return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
def __unicode__(self):
return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
def __del__(self):
if not self.closed:
self.close()
def flush(self):
pass
def __iter__(self):
if 'r' not in self.mode and '+' not in self.mode:
raise IOError("File not open for reading")
self.mem_file.seek(self.pos)
for line in self.mem_file:
yield line
@seek_and_lock
def next(self):
if 'r' not in self.mode and '+' not in self.mode:
raise IOError("File not open for reading")
return self.mem_file.next()
@seek_and_lock
def readline(self, *args, **kwargs):
if 'r' not in self.mode and '+' not in self.mode:
raise IOError("File not open for reading")
return self.mem_file.readline(*args, **kwargs)
def close(self):
do_close = False
self._lock.acquire()
try:
do_close = not self.closed and self.mem_file is not None
if do_close:
self.closed = True
finally:
self._lock.release()
if do_close:
self.memory_fs._on_close_memory_file(self, self.path)
@seek_and_lock
def read(self, size=None):
if 'r' not in self.mode and '+' not in self.mode:
raise IOError("File not open for reading")
if size is None:
size = -1
return self.mem_file.read(size)
@seek_and_lock
def seek(self, *args, **kwargs):
return self.mem_file.seek(*args, **kwargs)
@seek_and_lock
def tell(self):
return self.pos
@seek_and_lock
def truncate(self, *args, **kwargs):
if 'r' in self.mode and '+' not in self.mode:
raise IOError("File not open for writing")
return self.mem_file.truncate(*args, **kwargs)
#@seek_and_lock
def write(self, data):
if 'r' in self.mode and '+' not in self.mode:
raise IOError("File not open for writing")
self.memory_fs._on_modify_memory_file(self.path)
self._lock.acquire()
try:
self.mem_file.seek(self.pos)
self.mem_file.write(data)
self.pos = self.mem_file.tell()
finally:
self._lock.release()
@seek_and_lock
def writelines(self, *args, **kwargs):
return self.mem_file.writelines(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
return False
class DirEntry(object):
def sync(f):
def deco(self, *args, **kwargs):
if self.lock is not None:
try:
self.lock.acquire()
return f(self, *args, **kwargs)
finally:
self.lock.release()
else:
return f(self, *args, **kwargs)
return deco
def __init__(self, type, name, contents=None):
assert type in ("dir", "file"), "Type must be dir or file!"
self.type = type
self.name = name
if contents is None and type == "dir":
contents = {}
self.open_files = []
self.contents = contents
self.mem_file = None
self.created_time = datetime.datetime.now()
self.modified_time = self.created_time
self.accessed_time = self.created_time
self.xattrs = {}
self.lock = None
if self.type == 'file':
self.mem_file = StringIO()
self.lock = threading.RLock()
def get_value(self):
self.lock.acquire()
try:
return self.mem_file.getvalue()
finally:
self.lock.release()
data = property(get_value)
def desc_contents(self):
if self.isfile():
return "<file %s>" % self.name
elif self.isdir():
return "<dir %s>" % "".join("%s: %s" % (k, v.desc_contents()) for k, v in self.contents.iteritems())
def isdir(self):
return self.type == "dir"
def isfile(self):
return self.type == "file"
def __str__(self):
return "%s: %s" % (self.name, self.desc_contents())
@sync
def __getstate__(self):
state = self.__dict__.copy()
state.pop('lock')
if self.mem_file is not None:
state['mem_file'] = self.data
return state
def __setstate__(self, state):
self.__dict__.update(state)
if self.type == 'file':
self.lock = threading.RLock()
else:
self.lock = None
if self.mem_file is not None:
data = self.mem_file
self.mem_file = StringIO()
self.mem_file.write(data)
class MemoryFS(FS):
"""An in-memory filesystem.
"""
_meta = {'thread_safe': True,
'network': False,
'virtual': False,
'read_only': False,
'unicode_paths': True,
'case_insensitive_paths': False,
'atomic.move': False,
'atomic.copy': False,
'atomic.makedir': True,
'atomic.rename': True,
'atomic.setcontents': False}
def _make_dir_entry(self, *args, **kwargs):
return self.dir_entry_factory(*args, **kwargs)
def __init__(self, file_factory=None):
super(MemoryFS, self).__init__(thread_synchronize=_thread_synchronize_default)
self.dir_entry_factory = DirEntry
self.file_factory = file_factory or MemoryFile
if not callable(self.file_factory):
raise ValueError("file_factory should be callable")
self.root = self._make_dir_entry('dir', 'root')
def __str__(self):
return "<MemoryFS>"
def __repr__(self):
return "MemoryFS()"
def __unicode__(self):
return "<MemoryFS>"
@synchronize
def _get_dir_entry(self, dirpath):
dirpath = normpath(dirpath)
current_dir = self.root
for path_component in iteratepath(dirpath):
if current_dir.contents is None:
return None
dir_entry = current_dir.contents.get(path_component, None)
if dir_entry is None:
return None
current_dir = dir_entry
return current_dir
@synchronize
def _dir_entry(self, path):
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
return dir_entry
@synchronize
def desc(self, path):
if self.isdir(path):
return "Memory dir"
elif self.isfile(path):
return "Memory file object"
else:
return "No description available"
@synchronize
def isdir(self, path):
path = normpath(path)
if path in ('', '/'):
return True
dir_item = self._get_dir_entry(path)
if dir_item is None:
return False
return dir_item.isdir()
@synchronize
def isfile(self, path):
path = normpath(path)
if path in ('', '/'):
return False
dir_item = self._get_dir_entry(path)
if dir_item is None:
return False
return dir_item.isfile()
@synchronize
def exists(self, path):
path = normpath(path)
if path in ('', '/'):
return True
return self._get_dir_entry(path) is not None
@synchronize
def makedir(self, dirname, recursive=False, allow_recreate=False):
if not dirname and not allow_recreate:
raise PathError(dirname)
fullpath = normpath(dirname)
if fullpath in ('', '/'):
if allow_recreate:
return
raise DestinationExistsError(dirname)
dirpath, dirname = pathsplit(dirname.rstrip('/'))
if recursive:
parent_dir = self._get_dir_entry(dirpath)
if parent_dir is not None:
if parent_dir.isfile():
raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
else:
if not allow_recreate:
if dirname in parent_dir.contents:
raise DestinationExistsError(dirname, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
current_dir = self.root
for path_component in iteratepath(dirpath)[:-1]:
dir_item = current_dir.contents.get(path_component, None)
if dir_item is None:
break
if not dir_item.isdir():
raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
current_dir = dir_item
current_dir = self.root
for path_component in iteratepath(dirpath):
dir_item = current_dir.contents.get(path_component, None)
if dir_item is None:
new_dir = self._make_dir_entry("dir", path_component)
current_dir.contents[path_component] = new_dir
current_dir = new_dir
else:
current_dir = dir_item
parent_dir = current_dir
else:
parent_dir = self._get_dir_entry(dirpath)
if parent_dir is None:
raise ParentDirectoryMissingError(dirname, msg="Could not make dir, as parent dir does not exist: %(path)s")
dir_item = parent_dir.contents.get(dirname, None)
if dir_item is not None:
if dir_item.isdir():
if not allow_recreate:
raise DestinationExistsError(dirname)
else:
raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
if dir_item is None:
parent_dir.contents[dirname] = self._make_dir_entry("dir", dirname)
#@synchronize
#def _orphan_files(self, file_dir_entry):
# for f in file_dir_entry.open_files[:]:
# f.close()
@synchronize
@iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
path = normpath(path)
filepath, filename = pathsplit(path)
parent_dir_entry = self._get_dir_entry(filepath)
if parent_dir_entry is None or not parent_dir_entry.isdir():
raise ResourceNotFoundError(path)
if 'r' in mode or 'a' in mode:
if filename not in parent_dir_entry.contents:
raise ResourceNotFoundError(path)
file_dir_entry = parent_dir_entry.contents[filename]
if file_dir_entry.isdir():
raise ResourceInvalidError(path)
file_dir_entry.accessed_time = datetime.datetime.now()
mem_file = self.file_factory(path, self, file_dir_entry.mem_file, mode, file_dir_entry.lock)
file_dir_entry.open_files.append(mem_file)
return mem_file
elif 'w' in mode:
if filename not in parent_dir_entry.contents:
file_dir_entry = self._make_dir_entry("file", filename)
parent_dir_entry.contents[filename] = file_dir_entry
else:
file_dir_entry = parent_dir_entry.contents[filename]
file_dir_entry.accessed_time = datetime.datetime.now()
mem_file = self.file_factory(path, self, file_dir_entry.mem_file, mode, file_dir_entry.lock)
file_dir_entry.open_files.append(mem_file)
return mem_file
if parent_dir_entry is None:
raise ResourceNotFoundError(path)
@synchronize
def remove(self, path):
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
if dir_entry.isdir():
raise ResourceInvalidError(path, msg="That's a directory, not a file: %(path)s")
pathname, dirname = pathsplit(path)
parent_dir = self._get_dir_entry(pathname)
del parent_dir.contents[dirname]
@synchronize
def removedir(self, path, recursive=False, force=False):
path = normpath(path)
if path in ('', '/'):
raise RemoveRootError(path)
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
if not dir_entry.isdir():
raise ResourceInvalidError(path, msg="Can't remove resource, its not a directory: %(path)s" )
if dir_entry.contents and not force:
raise DirectoryNotEmptyError(path)
if recursive:
rpathname = path
while rpathname:
rpathname, dirname = pathsplit(rpathname)
parent_dir = self._get_dir_entry(rpathname)
if not dirname:
raise RemoveRootError(path)
del parent_dir.contents[dirname]
# stop recursing if the directory has other contents
if parent_dir.contents:
break
else:
pathname, dirname = pathsplit(path)
parent_dir = self._get_dir_entry(pathname)
if not dirname:
raise RemoveRootError(path)
del parent_dir.contents[dirname]
@synchronize
def rename(self, src, dst):
src = normpath(src)
dst = normpath(dst)
src_dir, src_name = pathsplit(src)
src_entry = self._get_dir_entry(src)
if src_entry is None:
raise ResourceNotFoundError(src)
open_files = src_entry.open_files[:]
for f in open_files:
f.flush()
f.path = dst
dst_dir,dst_name = pathsplit(dst)
dst_entry = self._get_dir_entry(dst)
if dst_entry is not None:
raise DestinationExistsError(dst)
src_dir_entry = self._get_dir_entry(src_dir)
src_xattrs = src_dir_entry.xattrs.copy()
dst_dir_entry = self._get_dir_entry(dst_dir)
if dst_dir_entry is None:
raise ParentDirectoryMissingError(dst)
dst_dir_entry.contents[dst_name] = src_dir_entry.contents[src_name]
dst_dir_entry.contents[dst_name].name = dst_name
dst_dir_entry.xattrs.update(src_xattrs)
del src_dir_entry.contents[src_name]
@synchronize
def settimes(self, path, accessed_time=None, modified_time=None):
now = datetime.datetime.now()
if accessed_time is None:
accessed_time = now
if modified_time is None:
modified_time = now
dir_entry = self._get_dir_entry(path)
if dir_entry is not None:
dir_entry.accessed_time = accessed_time
dir_entry.modified_time = modified_time
return True
return False
@synchronize
def _on_close_memory_file(self, open_file, path):
dir_entry = self._get_dir_entry(path)
if dir_entry is not None:
dir_entry.open_files.remove(open_file)
@synchronize
def _on_modify_memory_file(self, path):
dir_entry = self._get_dir_entry(path)
if dir_entry is not None:
dir_entry.modified_time = datetime.datetime.now()
@synchronize
def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
if dir_entry.isfile():
raise ResourceInvalidError(path, msg="not a directory: %(path)s")
paths = dir_entry.contents.keys()
for (i,p) in enumerate(paths):
if not isinstance(p,unicode):
paths[i] = unicode(p)
return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only)
@synchronize
def getinfo(self, path):
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
info = {}
info['created_time'] = dir_entry.created_time
info['modified_time'] = dir_entry.modified_time
info['accessed_time'] = dir_entry.accessed_time
if dir_entry.isdir():
info['st_mode'] = 0755 | stat.S_IFDIR
else:
info['size'] = len(dir_entry.data or b(''))
info['st_mode'] = 0666 | stat.S_IFREG
return info
@synchronize
def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=1024*64):
src_dir_entry = self._get_dir_entry(src)
if src_dir_entry is None:
raise ResourceNotFoundError(src)
src_xattrs = src_dir_entry.xattrs.copy()
super(MemoryFS, self).copydir(src, dst, overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size)
dst_dir_entry = self._get_dir_entry(dst)
if dst_dir_entry is not None:
dst_dir_entry.xattrs.update(src_xattrs)
@synchronize
def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=1024*64):
src_dir_entry = self._get_dir_entry(src)
if src_dir_entry is None:
raise ResourceNotFoundError(src)
src_xattrs = src_dir_entry.xattrs.copy()
super(MemoryFS, self).movedir(src, dst, overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size)
dst_dir_entry = self._get_dir_entry(dst)
if dst_dir_entry is not None:
dst_dir_entry.xattrs.update(src_xattrs)
@synchronize
def copy(self, src, dst, overwrite=False, chunk_size=1024*64):
src_dir_entry = self._get_dir_entry(src)
if src_dir_entry is None:
raise ResourceNotFoundError(src)
src_xattrs = src_dir_entry.xattrs.copy()
super(MemoryFS, self).copy(src, dst, overwrite, chunk_size)
dst_dir_entry = self._get_dir_entry(dst)
if dst_dir_entry is not None:
dst_dir_entry.xattrs.update(src_xattrs)
@synchronize
def move(self, src, dst, overwrite=False, chunk_size=1024*64):
src_dir_entry = self._get_dir_entry(src)
if src_dir_entry is None:
raise ResourceNotFoundError(src)
src_xattrs = src_dir_entry.xattrs.copy()
super(MemoryFS, self).move(src, dst, overwrite, chunk_size)
dst_dir_entry = self._get_dir_entry(dst)
if dst_dir_entry is not None:
dst_dir_entry.xattrs.update(src_xattrs)
@synchronize
def getcontents(self, path, mode="rb", encoding=None, errors=None, newline=None):
dir_entry = self._get_dir_entry(path)
if dir_entry is None:
raise ResourceNotFoundError(path)
if not dir_entry.isfile():
raise ResourceInvalidError(path, msg="not a file: %(path)s")
data = dir_entry.data or b('')
if 'b' not in mode:
return iotools.decode_binary(data, encoding=encoding, errors=errors, newline=newline)
return data
@synchronize
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=1024*64):
if isinstance(data, six.binary_type):
if not self.exists(path):
self.open(path, 'wb').close()
dir_entry = self._get_dir_entry(path)
if not dir_entry.isfile():
raise ResourceInvalidError('Not a directory %(path)s', path)
new_mem_file = StringIO()
new_mem_file.write(data)
dir_entry.mem_file = new_mem_file
return len(data)
return super(MemoryFS, self).setcontents(path, data=data, encoding=encoding, errors=errors, chunk_size=chunk_size)
# if isinstance(data, six.text_type):
# return super(MemoryFS, self).setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
# if not self.exists(path):
# self.open(path, 'wb').close()
# dir_entry = self._get_dir_entry(path)
# if not dir_entry.isfile():
# raise ResourceInvalidError('Not a directory %(path)s', path)
# new_mem_file = StringIO()
# new_mem_file.write(data)
# dir_entry.mem_file = new_mem_file
@synchronize
def setxattr(self, path, key, value):
dir_entry = self._dir_entry(path)
key = unicode(key)
dir_entry.xattrs[key] = value
@synchronize
def getxattr(self, path, key, default=None):
key = unicode(key)
dir_entry = self._dir_entry(path)
return dir_entry.xattrs.get(key, default)
@synchronize
def delxattr(self, path, key):
dir_entry = self._dir_entry(path)
try:
del dir_entry.xattrs[key]
except KeyError:
pass
@synchronize
def listxattrs(self, path):
dir_entry = self._dir_entry(path)
return dir_entry.xattrs.keys()
| {
"content_hash": "6bb534978f2303d1a0ece7c55381a4d0",
"timestamp": "",
"source": "github",
"line_count": 693,
"max_line_length": 155,
"avg_line_length": 34.277056277056275,
"alnum_prop": 0.5526648143470574,
"repo_name": "ctismer/pyfilesystem",
"id": "9a87db3b1451c81129df8ad9f4d9e2882411714a",
"size": "23777",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "fs/memoryfs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3263"
},
{
"name": "Python",
"bytes": "1058012"
},
{
"name": "Shell",
"bytes": "3083"
}
],
"symlink_target": ""
} |
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.conf import settings
from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Version
from readthedocs.projects.tasks import update_search
log = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-p',
dest='project',
default='',
help='Project to index'),
)
def handle(self, *args, **options):
"""Build/index all versions or a single project's version"""
project = options['project']
queryset = Version.objects.all()
if project:
queryset = queryset.filter(project__slug=project)
if not queryset.exists():
raise CommandError(
'No project with slug: {slug}'.format(slug=project))
log.info("Building all versions for %s" % project)
elif getattr(settings, 'INDEX_ONLY_LATEST', True):
queryset = queryset.filter(slug=LATEST)
for version in queryset:
log.info("Reindexing %s" % version)
try:
commit = version.project.vcs_repo(version.slug).commit
except:
# This will happen on prod
commit = None
try:
update_search(version.pk, commit,
delete_non_commit_files=False)
except Exception:
log.error('Reindex failed for %s' % version, exc_info=True)
| {
"content_hash": "0ddb5c2c93e0369d60e6f01f57120a9c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 75,
"avg_line_length": 33.01960784313726,
"alnum_prop": 0.5926365795724465,
"repo_name": "tddv/readthedocs.org",
"id": "d7b8176e37697c64248d33fcf187a13bffbbd677",
"size": "1684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/core/management/commands/reindex_elasticsearch.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "84305"
},
{
"name": "HTML",
"bytes": "236112"
},
{
"name": "JavaScript",
"bytes": "445655"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1146612"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mblog.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "b55d1b9c3c0d01ed35c4790a25abc47c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 69,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.7079646017699115,
"repo_name": "LouisLinY/mblog",
"id": "0c00222c6d9fb49b45e324917547be9f5bb82eed",
"size": "248",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1151"
},
{
"name": "Python",
"bytes": "6838"
}
],
"symlink_target": ""
} |
import argparse
import os
import logging
import time
from importlib import import_module
try:
from concurrent import futures
except ImportError:
import futures
from ghost import Ghost
from ghost.bindings import (
QPainter,
QPrinter,
QtCore,
)
import tornado.web
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.concurrent import run_on_executor
from PyPDF2 import (
PdfFileReader,
PdfFileWriter,
)
import nbformat
from jupyter_core.paths import jupyter_path
# the port on which to serve the fake server
PORT = 9999
# a notional default viewport...
VIEWPORT = (1200, 900)
# the version of the notebook format to use... some autodetect would be nice
IPYNB_VERSION = 4
class CaptureServer(HTTPServer):
""" A tornado server that handles serving up static HTTP assets. When the
assets are ready, `capture` is called
This should be subclassed to provide specific behavior: see
nbpresent.exporters.pdf_capture (from which this was refactored)
"""
executor = futures.ThreadPoolExecutor(max_workers=1)
pdf_name = "notebook.pdf"
ipynb_name = "notebook.ipynb"
embed_ipynb = True
@run_on_executor
def capture(self):
""" The main control flow for the capture process.
"""
self.ghost = self.init_ghost()
self.session = self.init_session()
self.session.open("http://localhost:{}/index.html".format(PORT))
try:
self.page_ready()
except Exception as err:
print(err)
self.print_to_pdf(self.in_static(self.pdf_name))
self.post_process()
raise KeyboardInterrupt()
def print_to_pdf(self, filename):
""" Saves page as a pdf file.
See qt4 QPrinter documentation for more detailed explanations
of options.
:param filename: The destination path.
"""
# TODO: read these from notebook metadata? args?
paper_size = (8.5, 11.0)
paper_margins = (0, 0, 0, 0)
paper_units = QPrinter.Inch
resolution = 1200
printer = QPrinter(QPrinter.HighResolution)
printer.setColorMode(QPrinter.Color)
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setPageMargins(*(paper_margins + (paper_units,)))
printer.setPaperSize(QtCore.QSizeF(*paper_size), paper_units)
printer.setResolution(resolution)
printer.setFullPage(True)
printer.setOutputFileName(filename)
# get some sizes for calculations
nb_width, nb_height = self.selector_size("#notebook")
# make the screen really long to fit the notebook
self.session.page.setViewportSize(
QtCore.QSize(VIEWPORT[0], nb_height + 40)
)
body_width, body_height = self.selector_size("body")
# calculate the native size
ratio = paper_size[0] / body_width
# make the page really long to fit the notebook
printer.setPaperSize(
QtCore.QSizeF(paper_size[0], nb_height * ratio),
paper_units)
painter = QPainter(printer)
# this is a dark art
painter.scale(8, 8)
self.session.main_frame.render(painter)
painter.end()
def selector_size(self, selector):
""" get the screen size of an element
"""
size, resources = self.session.evaluate(
"""(function(){
var el = document.querySelector("%s");
return [el.clientWidth, el.clientHeight];
})();""" % selector)
return size
def in_static(self, *bits):
""" return a path added to the current static path
"""
return os.path.join(self.static_path, *bits)
def init_ghost(self):
""" Create ghost instance... could be used to customize ghost/qt
behavior
"""
return Ghost(
log_level=logging.DEBUG
)
def init_session(self):
""" Create a ghost session
"""
return self.ghost.start(
# display=True,
# TODO: read this off config
viewport_size=VIEWPORT,
show_scrollbars=False,
)
def page_ready(self):
""" A delay to allow for all static assets to be loaded. Some still
seem to sneak through, thus the additional, hacky 3 second delay.
On a slow connection, this could *still* create problems.
"""
self.session.wait_for_page_loaded()
time.sleep(3)
def post_process(self):
""" After the PDF has been created, allow for manipulating the document.
The default is to embed the ipynb in the PDF.
"""
if self.embed_ipynb:
unmeta = PdfFileReader(self.in_static(self.pdf_name), "rb")
meta = PdfFileWriter()
meta.appendPagesFromReader(unmeta)
with open(self.in_static(self.ipynb_name), "rb") as fp:
meta.addAttachment(self.ipynb_name, fp.read())
with open(self.in_static(self.pdf_name), "wb") as fp:
meta.write(fp)
def pdf_capture(static_path, capture_server_class=None):
""" Starts a tornado server which serves all of the jupyter path locations
as well as the working directory
"""
settings = {
"static_path": static_path
}
handlers = [
(r"/(.*)", tornado.web.StaticFileHandler, {
"path": settings['static_path']
})
]
# add the jupyter static paths
for path in jupyter_path():
handlers += [
(r"/static/(.*)", tornado.web.StaticFileHandler, {
"path": os.path.join(path, "static")
})
]
app = tornado.web.Application(handlers, **settings)
if capture_server_class is None:
server = CaptureServer(app)
else:
_module, _klass = capture_server_class.split(":")
server = getattr(import_module(_module), _klass)(app)
# can't pass this to the constructor for some reason...
server.static_path = static_path
# add the parsed, normalized notebook
with open(os.path.join(static_path, "notebook.ipynb")) as fp:
server.notebook = nbformat.read(fp, IPYNB_VERSION)
ioloop = IOLoop()
# server.capture will be called when the ioloop is bored for the first time
ioloop.add_callback(server.capture)
# connect to a port
server.listen(PORT)
try:
# run forever
ioloop.start()
except KeyboardInterrupt:
# this is probably not the best way to escape, but works for now
print("Successfully created PDF")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDF from a directory of notebook assets")
parser.add_argument(
"static_path",
help="The directory to generate: must contain an index.html"
)
parser.add_argument(
"--capture-server-class",
help="Alternate server class with entry_point notation, e.g."
"some.module:ServerClass")
pdf_capture(**parser.parse_args().__dict__)
| {
"content_hash": "700dec7fbd701fab42dfbeabef601c06",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 80,
"avg_line_length": 28.903225806451612,
"alnum_prop": 0.61328125,
"repo_name": "Anaconda-Server/nbbrowserpdf",
"id": "083e7a836a88f1efa957ae996994939bafdc5d71",
"size": "7168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nbbrowserpdf/exporters/pdf_capture.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "166"
},
{
"name": "JavaScript",
"bytes": "464"
},
{
"name": "Python",
"bytes": "15243"
},
{
"name": "Shell",
"bytes": "176"
},
{
"name": "Smarty",
"bytes": "2092"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django import template
from django.utils.translation import ungettext, ugettext as _
from django.utils.timezone import is_aware, utc
from django.template import defaultfilters
from datetime import date, datetime
register = template.Library()
@register.filter
def danatime(value, arg='n/j/Y'):
if not isinstance(value, date): # datetime is a subclass of date
return value
now = datetime.now(utc if is_aware(value) else None)
if value > now:
return value
delta = now - value
if delta.days < 1:
# Fewer than a 24 hours ago (potentially different days)
if delta.seconds < 1:
# Fewer than 1 seconds ago
return _('now')
elif delta.seconds < 60:
# 1 or multiple seconds
# \u00a0 is a nonbreaking space
return ungettext(
'a second ago', '%(count)d\u00a0seconds ago', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
# 1 or multiple minutes
count = delta.seconds // 60
return ungettext(
'a minute ago', '%(count)s\u00a0minutes ago', count
) % {'count': count}
elif delta.seconds // (60 * 60) <= 6:
# 1 to 6 hours
count = delta.seconds // (60 * 60)
return ungettext(
'an hour ago', '%(count)d\u00a0hours ago', count
) % {'count': count}
elif value.date() == datetime.now().date():
# 6 to 24 hours ago on the same day
return defaultfilters.time(value)
else:
# 6 to 24 hours ago yesterday
return _('%s yesterday') % defaultfilters.time(value)
elif value.date().year == datetime.now().year:
# Same year
return defaultfilters.date(value, 'MONTH_DAY_FORMAT')
return defaultfilters.date(value, arg)
| {
"content_hash": "d3440cc4f3e14c0b8201fc2a962a8a91",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 75,
"avg_line_length": 36.148148148148145,
"alnum_prop": 0.5783811475409836,
"repo_name": "danasilver/django-dana-time",
"id": "5fa06bebf15eb5746e64964e0cc16ddfb8ff2e4f",
"size": "1952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "danatime/templatetags/danatime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8290"
}
],
"symlink_target": ""
} |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.urlresolvers import reverse
from django.forms import forms, SelectDateWidget
from django.forms.fields import DateTimeField
from django.forms.models import ModelForm
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from website.models import User, Document, participants_from_doc, createCreationNotification, createDeletionNotification, \
createInfoCreationNotification
from website.view.account import UserIsAdminMixin, user_is_admin
# model
from website.models import Resolution
# forms
class NewResolutionForm(ModelForm):
control_date = DateTimeField(widget=SelectDateWidget(), required=False)
class Meta:
model = Resolution
exclude = ('created_date', 'doc_id',)
class ResolutionForm(ModelForm):
class Meta:
model = Resolution
exclude = ()
# views
class CreateResolutionView(LoginRequiredMixin, CreateView):
form_class = NewResolutionForm
template_name = 'resolution/edit.html'
def get_success_url(self):
return reverse('document-edit', kwargs={'pk': self.kwargs.get('doc_id')})
def get_context_data(self, **kwargs):
context = super(CreateResolutionView, self).get_context_data(**kwargs)
context['action'] = reverse('resolution-new', kwargs=self.kwargs)
context['is_admin'] = user_is_admin(self.request.user)
context['active_menu'] = 'resolutions'
context['doc_id'] = self.kwargs.get('doc_id')
return context
def form_valid(self, form):
doc_id = self.kwargs.get('doc_id', None)
doc = get_object_or_404(Document, pk=doc_id)
resolution = form.save(commit=False)
resolution.doc_id = doc
resolution.created_date = now
resolution.save()
users = participants_from_doc(doc)
for user in users:
createInfoCreationNotification(user, unicode('Резолюция к документу <b>%s</b> создана','utf-8') % (unicode(doc.name)))
return HttpResponseRedirect(self.get_success_url())
class UpdateResolutionView(LoginRequiredMixin, UpdateView):
model = Resolution
template_name = 'resolution/edit.html'
fields = ('__all__')
def get_success_url(self):
return reverse('document-edit', kwargs={'pk': self.kwargs.get('doc_id')})
def get_context_data(self, **kwargs):
context = super(UpdateResolutionView, self).get_context_data(**kwargs)
context['action'] = reverse('resolution-edit', kwargs={'pk': self.get_object().id})
context['is_admin'] = user_is_admin(self.request.user)
context['active_menu'] = 'resolutions'
return context
class DeleteResolutionView(LoginRequiredMixin, DeleteView):
model = Resolution
template_name = 'resolution/delete.html'
def get_success_url(self):
return reverse('document-edit', kwargs={'pk': self.kwargs.get('doc_id')})
def get_context_data(self, **kwargs):
context = super(DeleteResolutionView, self).get_context_data(**kwargs)
context['is_admin'] = user_is_admin(self.request.user)
context['active_menu'] = 'resolutions'
return context
def delete(self, request, *args, **kwargs):
doc_id = self.kwargs.get('doc_id', None)
doc = get_object_or_404(Document, pk=doc_id)
users = participants_from_doc(doc)
url = super(DeleteResolutionView, self).delete(request, args, kwargs)
for user in users:
createDeletionNotification(user, unicode('Резолюция к <b>%s</b> удалена','utf-8') % (unicode(doc.name)))
return url
| {
"content_hash": "1cb48e579fa7845e43904c7ae39dd62e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 130,
"avg_line_length": 38.36734693877551,
"alnum_prop": 0.6867021276595745,
"repo_name": "abulbakha/sed",
"id": "a0db89f98815c38938a4ca2a5a141cba6dfbe383",
"size": "3827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/view/resolution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3684"
},
{
"name": "HTML",
"bytes": "113583"
},
{
"name": "JavaScript",
"bytes": "6673"
},
{
"name": "Python",
"bytes": "58638"
}
],
"symlink_target": ""
} |
import sys, os, urllib
sys.path.insert(1, "../../")
import h2o
def separator(ip, port):
path = "smalldata/jira/hexdev_29.csv"
fhex = h2o.import_file(h2o.locate(path), sep=",")
fhex.summary()
fhex_col_summary = h2o.H2OConnection.get_json("Frames/" + urllib.quote(fhex._id) + "/summary")["frames"][0]["columns"]
fhex_missing_count = sum([e["missing_count"] for e in fhex_col_summary])
assert fhex_missing_count == 0
fhex_wrong_separator = h2o.import_file(h2o.locate(path), sep=";")
fhex_wrong_separator.summary()
fhex_wrong_separator_col_summary = h2o.H2OConnection.get_json("Frames/" + urllib.quote(fhex_wrong_separator._id) + "/summary")["frames"][0]["columns"]
fhex_wrong_separator_missing_count = sum([e["missing_count"] for e in fhex_wrong_separator_col_summary])
assert fhex_wrong_separator_missing_count == fhex_wrong_separator._nrows*fhex_wrong_separator._ncols
try:
h2o.import_file(h2o.locate(path), sep="--")
except ValueError:
pass
else:
assert False
if __name__ == "__main__":
h2o.run_test(sys.argv, separator)
| {
"content_hash": "f4ca83b2aa2697c6a14e04314740979c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 155,
"avg_line_length": 37.13333333333333,
"alnum_prop": 0.6570915619389587,
"repo_name": "printedheart/h2o-3",
"id": "97627d9c61a347d4c5f1755741a5ccabecf33792",
"size": "1356",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/testdir_jira/pyunit_hexdev_29_separator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "163561"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8927"
},
{
"name": "Groovy",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "147257"
},
{
"name": "Java",
"bytes": "5417378"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Makefile",
"bytes": "34005"
},
{
"name": "Python",
"bytes": "2098211"
},
{
"name": "R",
"bytes": "1831996"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "16336"
},
{
"name": "Shell",
"bytes": "47017"
},
{
"name": "TeX",
"bytes": "588475"
}
],
"symlink_target": ""
} |
"""Public API functions and helpers for declarative."""
from ...schema import Table, MetaData
from ...orm import synonym as _orm_synonym, \
comparable_property,\
interfaces, properties, attributes
from ...orm.util import polymorphic_union
from ...orm.base import _mapper_or_none
from ...util import OrderedDict, hybridmethod, hybridproperty
from ... import util
from ... import exc
import weakref
from .base import _as_declarative, \
_declarative_constructor,\
_DeferredMapperConfig, _add_attribute
from .clsregistry import _class_resolver
def instrument_declarative(cls, registry, metadata):
"""Given a class, configure the class declaratively,
using the given registry, which can be any dictionary, and
MetaData object.
"""
if '_decl_class_registry' in cls.__dict__:
raise exc.InvalidRequestError(
"Class %r already has been "
"instrumented declaratively" % cls)
cls._decl_class_registry = registry
cls.metadata = metadata
_as_declarative(cls, cls.__name__, cls.__dict__)
def has_inherited_table(cls):
"""Given a class, return True if any of the classes it inherits from has a
mapped table, otherwise return False.
"""
for class_ in cls.__mro__[1:]:
if getattr(class_, '__table__', None) is not None:
return True
return False
class DeclarativeMeta(type):
def __init__(cls, classname, bases, dict_):
if '_decl_class_registry' not in cls.__dict__:
_as_declarative(cls, classname, cls.__dict__)
type.__init__(cls, classname, bases, dict_)
def __setattr__(cls, key, value):
_add_attribute(cls, key, value)
def synonym_for(name, map_column=False):
"""Decorator, make a Python @property a query synonym for a column.
A decorator version of :func:`~sqlalchemy.orm.synonym`. The function being
decorated is the 'descriptor', otherwise passes its arguments through to
synonym()::
@synonym_for('col')
@property
def prop(self):
return 'special sauce'
The regular ``synonym()`` is also usable directly in a declarative setting
and may be convenient for read/write properties::
prop = synonym('col', descriptor=property(_read_prop, _write_prop))
"""
def decorate(fn):
return _orm_synonym(name, map_column=map_column, descriptor=fn)
return decorate
def comparable_using(comparator_factory):
"""Decorator, allow a Python @property to be used in query criteria.
This is a decorator front end to
:func:`~sqlalchemy.orm.comparable_property` that passes
through the comparator_factory and the function being decorated::
@comparable_using(MyComparatorType)
@property
def prop(self):
return 'special sauce'
The regular ``comparable_property()`` is also usable directly in a
declarative setting and may be convenient for read/write properties::
prop = comparable_property(MyComparatorType)
"""
def decorate(fn):
return comparable_property(comparator_factory, fn)
return decorate
class declared_attr(interfaces._MappedAttribute, property):
"""Mark a class-level method as representing the definition of
a mapped property or special declarative member name.
@declared_attr turns the attribute into a scalar-like
property that can be invoked from the uninstantiated class.
Declarative treats attributes specifically marked with
@declared_attr as returning a construct that is specific
to mapping or declarative table configuration. The name
of the attribute is that of what the non-dynamic version
of the attribute would be.
@declared_attr is more often than not applicable to mixins,
to define relationships that are to be applied to different
implementors of the class::
class ProvidesUser(object):
"A mixin that adds a 'user' relationship to classes."
@declared_attr
def user(self):
return relationship("User")
It also can be applied to mapped classes, such as to provide
a "polymorphic" scheme for inheritance::
class Employee(Base):
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Employee':
return {
"polymorphic_on":cls.type,
"polymorphic_identity":"Employee"
}
else:
return {"polymorphic_identity":cls.__name__}
.. versionchanged:: 0.8 :class:`.declared_attr` can be used with
non-ORM or extension attributes, such as user-defined attributes
or :func:`.association_proxy` objects, which will be assigned
to the class at class construction time.
"""
def __init__(self, fget, cascading=False):
super(declared_attr, self).__init__(fget)
self.__doc__ = fget.__doc__
self._cascading = cascading
def __get__(desc, self, cls):
reg = cls.__dict__.get('_sa_declared_attr_reg', None)
if reg is None:
manager = attributes.manager_of_class(cls)
if manager is None:
util.warn(
"Unmanaged access of declarative attribute %s from "
"non-mapped class %s" %
(desc.fget.__name__, cls.__name__))
return desc.fget(cls)
if reg is None:
return desc.fget(cls)
elif desc in reg:
return reg[desc]
else:
reg[desc] = obj = desc.fget(cls)
return obj
@hybridmethod
def _stateful(cls, **kw):
return _stateful_declared_attr(**kw)
@hybridproperty
def cascading(cls):
"""Mark a :class:`.declared_attr` as cascading.
This is a special-use modifier which indicates that a column
or MapperProperty-based declared attribute should be configured
distinctly per mapped subclass, within a mapped-inheritance scenario.
Below, both MyClass as well as MySubClass will have a distinct
``id`` Column object established::
class HasSomeAttribute(object):
@declared_attr.cascading
def some_id(cls):
if has_inherited_table(cls):
return Column(
ForeignKey('myclass.id'), primary_key=True)
else:
return Column(Integer, primary_key=True)
return Column('id', Integer, primary_key=True)
class MyClass(HasSomeAttribute, Base):
""
# ...
class MySubClass(MyClass):
""
# ...
The behavior of the above configuration is that ``MySubClass``
will refer to both its own ``id`` column as well as that of
``MyClass`` underneath the attribute named ``some_id``.
.. seealso::
:ref:`declarative_inheritance`
:ref:`mixin_inheritance_columns`
"""
return cls._stateful(cascading=True)
class _stateful_declared_attr(declared_attr):
def __init__(self, **kw):
self.kw = kw
def _stateful(self, **kw):
new_kw = self.kw.copy()
new_kw.update(kw)
return _stateful_declared_attr(**new_kw)
def __call__(self, fn):
return declared_attr(fn, **self.kw)
def declarative_base(bind=None, metadata=None, mapper=None, cls=object,
name='Base', constructor=_declarative_constructor,
class_registry=None,
metaclass=DeclarativeMeta):
"""Construct a base class for declarative class definitions.
The new base class will be given a metaclass that produces
appropriate :class:`~sqlalchemy.schema.Table` objects and makes
the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the
information provided declaratively in the class and any subclasses
of the class.
:param bind: An optional
:class:`~sqlalchemy.engine.Connectable`, will be assigned
the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData`
instance.
:param metadata:
An optional :class:`~sqlalchemy.schema.MetaData` instance. All
:class:`~sqlalchemy.schema.Table` objects implicitly declared by
subclasses of the base will share this MetaData. A MetaData instance
will be created if none is provided. The
:class:`~sqlalchemy.schema.MetaData` instance will be available via the
`metadata` attribute of the generated declarative base class.
:param mapper:
An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will
be used to map subclasses to their Tables.
:param cls:
Defaults to :class:`object`. A type to use as the base for the generated
declarative base class. May be a class or tuple of classes.
:param name:
Defaults to ``Base``. The display name for the generated
class. Customizing this is not required, but can improve clarity in
tracebacks and debugging.
:param constructor:
Defaults to
:func:`~sqlalchemy.ext.declarative._declarative_constructor`, an
__init__ implementation that assigns \**kwargs for declared
fields and relationships to an instance. If ``None`` is supplied,
no __init__ will be provided and construction will fall back to
cls.__init__ by way of the normal Python semantics.
:param class_registry: optional dictionary that will serve as the
registry of class names-> mapped classes when string names
are used to identify classes inside of :func:`.relationship`
and others. Allows two or more declarative base classes
to share the same registry of class names for simplified
inter-base relationships.
:param metaclass:
Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
compatible callable to use as the meta type of the generated
declarative base class.
.. seealso::
:func:`.as_declarative`
"""
lcl_metadata = metadata or MetaData()
if bind:
lcl_metadata.bind = bind
if class_registry is None:
class_registry = weakref.WeakValueDictionary()
bases = not isinstance(cls, tuple) and (cls,) or cls
class_dict = dict(_decl_class_registry=class_registry,
metadata=lcl_metadata)
if constructor:
class_dict['__init__'] = constructor
if mapper:
class_dict['__mapper_cls__'] = mapper
return metaclass(name, bases, class_dict)
def as_declarative(**kw):
"""
Class decorator for :func:`.declarative_base`.
Provides a syntactical shortcut to the ``cls`` argument
sent to :func:`.declarative_base`, allowing the base class
to be converted in-place to a "declarative" base::
from sqlalchemy.ext.declarative import as_declarative
@as_declarative()
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyMappedClass(Base):
# ...
All keyword arguments passed to :func:`.as_declarative` are passed
along to :func:`.declarative_base`.
.. versionadded:: 0.8.3
.. seealso::
:func:`.declarative_base`
"""
def decorate(cls):
kw['cls'] = cls
kw['name'] = cls.__name__
return declarative_base(**kw)
return decorate
class ConcreteBase(object):
"""A helper class for 'concrete' declarative mappings.
:class:`.ConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :meth:`.after_configured` event.
:class:`.ConcreteBase` produces a mapped
table for the class itself. Compare to :class:`.AbstractConcreteBase`,
which does not.
Example::
from sqlalchemy.ext.declarative import ConcreteBase
class Employee(ConcreteBase, Base):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee',
'concrete':True}
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
"""
@classmethod
def _create_polymorphic_union(cls, mappers):
return polymorphic_union(OrderedDict(
(mp.polymorphic_identity, mp.local_table)
for mp in mappers
), 'type', 'pjoin')
@classmethod
def __declare_first__(cls):
m = cls.__mapper__
if m.with_polymorphic:
return
mappers = list(m.self_and_descendants)
pjoin = cls._create_polymorphic_union(mappers)
m._set_with_polymorphic(("*", pjoin))
m._set_polymorphic_on(pjoin.c.type)
class AbstractConcreteBase(ConcreteBase):
"""A helper class for 'concrete' declarative mappings.
:class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :meth:`.after_configured` event.
:class:`.AbstractConcreteBase` does produce a mapped class
for the base class, however it is not persisted to any table; it
is instead mapped directly to the "polymorphic" selectable directly
and is only used for selecting. Compare to :class:`.ConcreteBase`,
which does create a persisted table for the base class.
Example::
from sqlalchemy.ext.declarative import AbstractConcreteBase
class Employee(AbstractConcreteBase, Base):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
The abstract base class is handled by declarative in a special way;
at class configuration time, it behaves like a declarative mixin
or an ``__abstract__`` base class. Once classes are configured
and mappings are produced, it then gets mapped itself, but
after all of its decscendants. This is a very unique system of mapping
not found in any other SQLAlchemy system.
Using this approach, we can specify columns and properties
that will take place on mapped subclasses, in the way that
we normally do as in :ref:`declarative_mixins`::
class Company(Base):
__tablename__ = 'company'
id = Column(Integer, primary_key=True)
class Employee(AbstractConcreteBase, Base):
employee_id = Column(Integer, primary_key=True)
@declared_attr
def company_id(cls):
return Column(ForeignKey('company.id'))
@declared_attr
def company(cls):
return relationship("Company")
class Manager(Employee):
__tablename__ = 'manager'
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
When we make use of our mappings however, both ``Manager`` and
``Employee`` will have an independently usable ``.company`` attribute::
session.query(Employee).filter(Employee.company.has(id=5))
.. versionchanged:: 1.0.0 - The mechanics of :class:`.AbstractConcreteBase`
have been reworked to support relationships established directly
on the abstract base, without any special configurational steps.
"""
__no_table__ = True
@classmethod
def __declare_first__(cls):
cls._sa_decl_prepare_nocascade()
@classmethod
def _sa_decl_prepare_nocascade(cls):
if getattr(cls, '__mapper__', None):
return
to_map = _DeferredMapperConfig.config_for_cls(cls)
# can't rely on 'self_and_descendants' here
# since technically an immediate subclass
# might not be mapped, but a subclass
# may be.
mappers = []
stack = list(cls.__subclasses__())
while stack:
klass = stack.pop()
stack.extend(klass.__subclasses__())
mn = _mapper_or_none(klass)
if mn is not None:
mappers.append(mn)
pjoin = cls._create_polymorphic_union(mappers)
to_map.local_table = pjoin
m_args = to_map.mapper_args_fn or dict
def mapper_args():
args = m_args()
args['polymorphic_on'] = pjoin.c.type
return args
to_map.mapper_args_fn = mapper_args
m = to_map.map()
for scls in cls.__subclasses__():
sm = _mapper_or_none(scls)
if sm and sm.concrete and cls in scls.__bases__:
sm._set_concrete_base(m)
class DeferredReflection(object):
"""A helper class for construction of mappings based on
a deferred reflection step.
Normally, declarative can be used with reflection by
setting a :class:`.Table` object using autoload=True
as the ``__table__`` attribute on a declarative class.
The caveat is that the :class:`.Table` must be fully
reflected, or at the very least have a primary key column,
at the point at which a normal declarative mapping is
constructed, meaning the :class:`.Engine` must be available
at class declaration time.
The :class:`.DeferredReflection` mixin moves the construction
of mappers to be at a later point, after a specific
method is called which first reflects all :class:`.Table`
objects created so far. Classes can define it as such::
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import DeferredReflection
Base = declarative_base()
class MyClass(DeferredReflection, Base):
__tablename__ = 'mytable'
Above, ``MyClass`` is not yet mapped. After a series of
classes have been defined in the above fashion, all tables
can be reflected and mappings created using
:meth:`.prepare`::
engine = create_engine("someengine://...")
DeferredReflection.prepare(engine)
The :class:`.DeferredReflection` mixin can be applied to individual
classes, used as the base for the declarative base itself,
or used in a custom abstract class. Using an abstract base
allows that only a subset of classes to be prepared for a
particular prepare step, which is necessary for applications
that use more than one engine. For example, if an application
has two engines, you might use two bases, and prepare each
separately, e.g.::
class ReflectedOne(DeferredReflection, Base):
__abstract__ = True
class ReflectedTwo(DeferredReflection, Base):
__abstract__ = True
class MyClass(ReflectedOne):
__tablename__ = 'mytable'
class MyOtherClass(ReflectedOne):
__tablename__ = 'myothertable'
class YetAnotherClass(ReflectedTwo):
__tablename__ = 'yetanothertable'
# ... etc.
Above, the class hierarchies for ``ReflectedOne`` and
``ReflectedTwo`` can be configured separately::
ReflectedOne.prepare(engine_one)
ReflectedTwo.prepare(engine_two)
.. versionadded:: 0.8
"""
@classmethod
def prepare(cls, engine):
"""Reflect all :class:`.Table` objects for all current
:class:`.DeferredReflection` subclasses"""
to_map = _DeferredMapperConfig.classes_for_base(cls)
for thingy in to_map:
cls._sa_decl_prepare(thingy.local_table, engine)
thingy.map()
mapper = thingy.cls.__mapper__
metadata = mapper.class_.metadata
for rel in mapper._props.values():
if isinstance(rel, properties.RelationshipProperty) and \
rel.secondary is not None:
if isinstance(rel.secondary, Table):
cls._reflect_table(rel.secondary, engine)
elif isinstance(rel.secondary, _class_resolver):
rel.secondary._resolvers += (
cls._sa_deferred_table_resolver(engine, metadata),
)
@classmethod
def _sa_deferred_table_resolver(cls, engine, metadata):
def _resolve(key):
t1 = Table(key, metadata)
cls._reflect_table(t1, engine)
return t1
return _resolve
@classmethod
def _sa_decl_prepare(cls, local_table, engine):
# autoload Table, which is already
# present in the metadata. This
# will fill in db-loaded columns
# into the existing Table object.
if local_table is not None:
cls._reflect_table(local_table, engine)
@classmethod
def _reflect_table(cls, table, engine):
Table(table.name,
table.metadata,
extend_existing=True,
autoload_replace=False,
autoload=True,
autoload_with=engine,
schema=table.schema)
| {
"content_hash": "a3285ed0eda288568c541d4e992205e9",
"timestamp": "",
"source": "github",
"line_count": 654,
"max_line_length": 79,
"avg_line_length": 34.0045871559633,
"alnum_prop": 0.6191375511488826,
"repo_name": "mollstam/UnrealPy",
"id": "3d46bd4cb291b08c164673f04cc72fc3f7981962",
"size": "22481",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/SQLAlchemy-1.0.6/lib/sqlalchemy/ext/declarative/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886305"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925518"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from flask import current_app, request, Response, json
class JsonResponse(Response):
""" Response from a JSON API view """
def __init__(self, response, status=None, headers=None, **kwargs):
""" Init a JSON response
:param response: Response data
:type response: *
:param status: Status code
:type status: int|None
:param headers: Additional headers
:type headers: dict|None
"""
# Store response
self._response_data = self.preprocess_response_data(response)
# PrettyPrint?
try:
indent = 2 if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] and not request.is_xhr else None
except RuntimeError: # "RuntimeError: working outside of application context"
indent = None
# Init super
super(JsonResponse, self).__init__(
json.dumps(self._response_data, indent=indent),
headers=headers, status=status, mimetype='application/json',
direct_passthrough=True, **kwargs)
def preprocess_response_data(self, response):
""" Preprocess the response data.
Override this method to have custom handling of the response
:param response: Return value from the view function
:type response: *
:return: Preprocessed value
"""
return response
def get_json(self):
""" Get the response data object (preprocessed) """
return self._response_data
def __getitem__(self, item):
""" Proxy method to get items from the underlying object """
return self._response_data[item]
def normalize_response_value(rv):
""" Normalize the response value into a 3-tuple (rv, status, headers)
:type rv: tuple|*
:returns: tuple(rv, status, headers)
:rtype: tuple(Response|JsonResponse|*, int|None, dict|None)
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
return rv, status, headers
def make_json_response(rv):
""" Make JsonResponse
:param rv: Response: the object to encode, or tuple (response, status, headers)
:type rv: tuple|*
:rtype: JsonResponse
"""
# Tuple of (response, status, headers)
rv, status, headers = normalize_response_value(rv)
# JsonResponse
if isinstance(rv, JsonResponse):
return rv
# Data
return JsonResponse(rv, status, headers)
| {
"content_hash": "eb3651dc5a2deea6e73a183daf0aeced",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 108,
"avg_line_length": 31.772151898734176,
"alnum_prop": 0.6247011952191235,
"repo_name": "kolypto/py-flask-jsontools",
"id": "93485f084e7b633357a099d5217db0026bb39c49",
"size": "2510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_jsontools/response.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "754"
},
{
"name": "Python",
"bytes": "29269"
}
],
"symlink_target": ""
} |
"""
This module contains the main user-facing abstractions. The main entry
point is the `GraphDatabase` class which can be used to obtain `Driver`
instances that are in turn used for managing sessions.
"""
from __future__ import division
from collections import deque
from .bolt import connect, Response, RUN, PULL_ALL
from .compat import integer, string, urlparse
from .constants import DEFAULT_PORT, ENCRYPTED_DEFAULT, TRUST_DEFAULT, TRUST_SIGNED_CERTIFICATES
from .exceptions import CypherError, ProtocolError, ResultError
from .ssl_compat import SSL_AVAILABLE, SSLContext, PROTOCOL_SSLv23, OP_NO_SSLv2, CERT_REQUIRED
from .summary import ResultSummary
from .types import hydrated
DEFAULT_MAX_POOL_SIZE = 50
class AuthToken(object):
""" Container for auth information
"""
def __init__(self, scheme, principal, credentials):
self.scheme = scheme
self.principal = principal
self.credentials = credentials
class GraphDatabase(object):
""" The :class:`.GraphDatabase` class provides access to all graph
database functionality. This is primarily used to construct a driver
instance, using the :meth:`.driver` method.
"""
@staticmethod
def driver(url, **config):
""" Acquire a :class:`.Driver` instance for the given URL and
configuration:
>>> from neo4j.v1 import GraphDatabase
>>> driver = GraphDatabase.driver("bolt://localhost")
"""
return Driver(url, **config)
class Driver(object):
""" Accessor for a specific graph database resource.
"""
def __init__(self, address, **config):
if "://" in address:
parsed = urlparse(address)
if parsed.scheme == "bolt":
host = parsed.hostname
port = parsed.port or DEFAULT_PORT
else:
raise ProtocolError("Only the 'bolt' URI scheme is supported [%s]" % address)
elif ":" in address:
host, port = address.split(":")
port = int(port)
else:
host = address
port = DEFAULT_PORT
self.address = (host, port)
self.config = config
self.max_pool_size = config.get("max_pool_size", DEFAULT_MAX_POOL_SIZE)
self.session_pool = deque()
try:
self.encrypted = encrypted = config["encrypted"]
except KeyError:
_warn_about_insecure_default()
self.encrypted = encrypted = ENCRYPTED_DEFAULT
self.trust = trust = config.get("trust", TRUST_DEFAULT)
if encrypted:
if not SSL_AVAILABLE:
raise RuntimeError("Bolt over TLS is only available in Python 2.7.9+ and Python 3.3+")
ssl_context = SSLContext(PROTOCOL_SSLv23)
ssl_context.options |= OP_NO_SSLv2
if trust >= TRUST_SIGNED_CERTIFICATES:
ssl_context.verify_mode = CERT_REQUIRED
ssl_context.set_default_verify_paths()
self.ssl_context = ssl_context
else:
self.ssl_context = None
def session(self):
""" Create a new session based on the graph database details
specified within this driver:
>>> from neo4j.v1 import GraphDatabase
>>> driver = GraphDatabase.driver("bolt://localhost")
>>> session = driver.session()
"""
session = None
connected = False
while not connected:
try:
session = self.session_pool.pop()
except IndexError:
connection = connect(self.address, self.ssl_context, **self.config)
session = Session(self, connection)
connected = True
else:
if session.healthy:
#session.connection.reset()
connected = session.healthy
return session
def recycle(self, session):
""" Accept a session for recycling, if healthy.
:param session:
:return:
"""
pool = self.session_pool
for s in list(pool): # freezing the pool into a list for iteration allows pool mutation inside the loop
if not s.healthy:
pool.remove(s)
if session.healthy and len(pool) < self.max_pool_size and session not in pool:
pool.appendleft(session)
class StatementResult(object):
""" A handler for the result of Cypher statement execution.
"""
#: The statement text that was executed to produce this result.
statement = None
#: Dictionary of parameters passed with the statement.
parameters = None
def __init__(self, connection, run_response, pull_all_response):
super(StatementResult, self).__init__()
# The Connection instance behind this result.
self.connection = connection
# The keys for the records in the result stream. These are
# lazily populated on request.
self._keys = None
# Buffer for incoming records to be queued before yielding. If
# the result is used immediately, this buffer will be ignored.
self._buffer = deque()
# The result summary (populated after the records have been
# fully consumed).
self._summary = None
# Flag to indicate whether the entire stream has been consumed
# from the network (but not necessarily yielded).
self._consumed = False
def on_header(metadata):
# Called on receipt of the result header.
self._keys = metadata["fields"]
def on_record(values):
# Called on receipt of each result record.
self._buffer.append(values)
def on_footer(metadata):
# Called on receipt of the result footer.
self._summary = ResultSummary(self.statement, self.parameters, **metadata)
self._consumed = True
def on_failure(metadata):
# Called on execution failure.
self._consumed = True
raise CypherError(metadata)
run_response.on_success = on_header
run_response.on_failure = on_failure
pull_all_response.on_record = on_record
pull_all_response.on_success = on_footer
pull_all_response.on_failure = on_failure
def __iter__(self):
while self._buffer:
values = self._buffer.popleft()
yield Record(self.keys(), tuple(map(hydrated, values)))
while not self._consumed:
self.connection.fetch()
while self._buffer:
values = self._buffer.popleft()
yield Record(self.keys(), tuple(map(hydrated, values)))
def keys(self):
""" Return the keys for the records.
"""
# Fetch messages until we have the header or a failure
while self._keys is None and not self._consumed:
self.connection.fetch()
return tuple(self._keys)
def buffer(self):
if self.connection and not self.connection.closed:
while not self._consumed:
self.connection.fetch()
self.connection = None
def consume(self):
""" Consume the remainder of this result and return the
summary.
"""
if self.connection and not self.connection.closed:
list(self)
self.connection = None
return self._summary
def single(self):
""" Return the next record, failing if none or more than one remain.
"""
records = list(self)
num_records = len(records)
if num_records == 0:
raise ResultError("Cannot retrieve a single record, because this result is empty.")
elif num_records != 1:
raise ResultError("Expected a result with a single record, but this result contains at least one more.")
else:
return records[0]
def peek(self):
""" Return the next record without advancing the cursor. Fails
if no records remain.
"""
if self._buffer:
values = self._buffer[0]
return Record(self.keys(), tuple(map(hydrated, values)))
while not self._buffer and not self._consumed:
self.connection.fetch()
if self._buffer:
values = self._buffer[0]
return Record(self.keys(), tuple(map(hydrated, values)))
raise ResultError("End of stream")
class Session(object):
""" Logical session carried out over an established TCP connection.
Sessions should generally be constructed using the :meth:`.Driver.session`
method.
"""
def __init__(self, driver, connection):
self.driver = driver
self.connection = connection
self.transaction = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def healthy(self):
""" Return ``True`` if this session is healthy, ``False`` if
unhealthy and ``None`` if closed.
"""
return self.connection.healthy
def run(self, statement, parameters=None):
""" Run a parameterised Cypher statement.
:param statement: Cypher statement to execute
:param parameters: dictionary of parameters
:return: Cypher result
:rtype: :class:`.StatementResult`
"""
if self.transaction:
raise ProtocolError("Statements cannot be run directly on a session with an open transaction;"
" either run from within the transaction or use a different session.")
return run(self.connection, statement, parameters)
def close(self):
""" Recycle this session through the driver it came from.
"""
if self.connection and not self.connection.closed:
self.connection.fetch_all()
if self.transaction:
self.transaction.close()
self.driver.recycle(self)
def begin_transaction(self):
""" Create a new :class:`.Transaction` within this session.
:return: new :class:`.Transaction` instance.
"""
if self.transaction:
raise ProtocolError("You cannot begin a transaction on a session with an open transaction;"
" either run from within the transaction or use a different session.")
def clear_transaction():
self.transaction = None
self.transaction = Transaction(self.connection, on_close=clear_transaction)
return self.transaction
class Transaction(object):
""" Container for multiple Cypher queries to be executed within
a single context. Transactions can be used within a :py:const:`with`
block where the value of :attr:`.success` will determine whether
the transaction is committed or rolled back on :meth:`.Transaction.close`::
with session.new_transaction() as tx:
pass
"""
#: When closed, the transaction will be committed if marked as successful
#: and rolled back otherwise. This attribute can be set in user code
#: multiple times before a transaction completes with only the final
#: value taking effect.
success = False
#: Indicator to show whether the transaction has been closed, either
#: with commit or rollback.
closed = False
def __init__(self, connection, on_close):
self.connection = connection
self.on_close = on_close
run(self.connection, "BEGIN")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value:
self.success = False
self.close()
def run(self, statement, parameters=None):
""" Run a Cypher statement within the context of this transaction.
:param statement: Cypher statement
:param parameters: dictionary of parameters
:return: result object
"""
assert not self.closed
return run(self.connection, statement, parameters)
def commit(self):
""" Mark this transaction as successful and close in order to
trigger a COMMIT.
"""
self.success = True
self.close()
def rollback(self):
""" Mark this transaction as unsuccessful and close in order to
trigger a ROLLBACK.
"""
self.success = False
self.close()
def close(self):
""" Close this transaction, triggering either a COMMIT or a ROLLBACK.
"""
assert not self.closed
if self.success:
run(self.connection, "COMMIT")
else:
run(self.connection, "ROLLBACK")
self.closed = True
self.on_close()
class Record(object):
""" Record is an ordered collection of fields.
A Record object is used for storing result values along with field names.
Fields can be accessed by numeric or named index (``record[0]`` or
``record["field"]``).
"""
def __init__(self, keys, values):
self._keys = tuple(keys)
self._values = tuple(values)
def keys(self):
""" Return the keys (key names) of the record
"""
return self._keys
def values(self):
""" Return the values of the record
"""
return self._values
def items(self):
""" Return the fields of the record as a list of key and value tuples
"""
return zip(self._keys, self._values)
def index(self, key):
""" Return the index of the given key
"""
try:
return self._keys.index(key)
except ValueError:
raise KeyError(key)
def __record__(self):
return self
def __contains__(self, key):
return self._keys.__contains__(key)
def __iter__(self):
return iter(self._keys)
def copy(self):
return Record(self._keys, self._values)
def __getitem__(self, item):
if isinstance(item, string):
return self._values[self.index(item)]
elif isinstance(item, integer):
return self._values[item]
else:
raise TypeError(item)
def __len__(self):
return len(self._keys)
def __repr__(self):
values = self._values
s = []
for i, field in enumerate(self._keys):
s.append("%s=%r" % (field, values[i]))
return "<Record %s>" % " ".join(s)
def __hash__(self):
return hash(self._keys) ^ hash(self._values)
def __eq__(self, other):
try:
return self._keys == tuple(other.keys()) and self._values == tuple(other.values())
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def basic_auth(user, password):
""" Generate a basic auth token for a given user and password.
:param user: user name
:param password: current password
:return: auth token for use with :meth:`GraphDatabase.driver`
"""
return AuthToken("basic", user, password)
def run(connection, statement, parameters=None):
""" Run a Cypher statement on a given connection.
:param connection: connection to carry the request and response
:param statement: Cypher statement
:param parameters: optional dictionary of parameters
:return: statement result
"""
# Ensure the statement is a Unicode value
if isinstance(statement, bytes):
statement = statement.decode("UTF-8")
params = {}
for key, value in (parameters or {}).items():
if isinstance(key, bytes):
key = key.decode("UTF-8")
if isinstance(value, bytes):
params[key] = value.decode("UTF-8")
else:
params[key] = value
parameters = params
run_response = Response(connection)
pull_all_response = Response(connection)
result = StatementResult(connection, run_response, pull_all_response)
result.statement = statement
result.parameters = parameters
connection.append(RUN, (statement, parameters), response=run_response)
connection.append(PULL_ALL, response=pull_all_response)
connection.send()
return result
_warned_about_insecure_default = False
def _warn_about_insecure_default():
global _warned_about_insecure_default
if not SSL_AVAILABLE and not _warned_about_insecure_default:
from warnings import warn
warn("Bolt over TLS is only available in Python 2.7.9+ and Python 3.3+ "
"so communications are not secure")
_warned_about_insecure_default = True
| {
"content_hash": "e3269df9f7c34a235066dfca28d2aa15",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 116,
"avg_line_length": 32.590196078431376,
"alnum_prop": 0.6063413753685097,
"repo_name": "nigelsmall/py2neo",
"id": "474d837a04e53086961e6babfe9af988427c2059",
"size": "17360",
"binary": false,
"copies": "1",
"ref": "refs/heads/v3",
"path": "py2neo/packages/neo4j/v1/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3949"
},
{
"name": "Makefile",
"bytes": "6765"
},
{
"name": "Python",
"bytes": "899725"
},
{
"name": "Shell",
"bytes": "6769"
},
{
"name": "Smarty",
"bytes": "3942"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shelves', '0020_alter_shelf_footer_pathname'),
]
operations = [
migrations.AlterField(
model_name='shelf',
name='addon_type',
field=models.PositiveIntegerField(choices=[(1, 'Extension'), (2, 'Deprecated Complete Theme'), (3, 'Dictionary'), (4, 'Search Engine'), (5, 'Language Pack (Application)'), (6, 'Language Pack (Add-on)'), (7, 'Plugin'), (9, 'Deprecated LWT'), (10, 'Theme (Static)'), (12, 'Site Permission')], db_column='addontype_id', default=1),
),
]
| {
"content_hash": "eb4620d456e6ba21470b21f02fa8922a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 340,
"avg_line_length": 39.9375,
"alnum_prop": 0.6040688575899843,
"repo_name": "diox/olympia",
"id": "25fa516e9fdd0165aa77e76a2141f325ff8149e3",
"size": "689",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/olympia/shelves/migrations/0021_alter_shelf_addon_type.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245459"
},
{
"name": "Dockerfile",
"bytes": "3900"
},
{
"name": "HTML",
"bytes": "290496"
},
{
"name": "JavaScript",
"bytes": "750827"
},
{
"name": "Less",
"bytes": "212819"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6811560"
},
{
"name": "Shell",
"bytes": "8638"
},
{
"name": "Smarty",
"bytes": "1261"
}
],
"symlink_target": ""
} |
def match(command, settings):
return command.script.startswith('git d')
def get_new_command(command, settings):
return '{} --staged'.format(command.script)
| {
"content_hash": "7461718a68c729a890bb5a0a833802ab",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 47,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.7108433734939759,
"repo_name": "suxinde2009/thefuck",
"id": "32e5dcbbb1ade61584748dd3118cb480193da20f",
"size": "166",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "thefuck/rules/git_diff_staged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132514"
}
],
"symlink_target": ""
} |
from collections import Counter
def most_common(s):
c=Counter(s)
return ''.join(sorted(s, key=lambda x: c[x], reverse=1))
| {
"content_hash": "9c9023336fd6437c14cbcd730769222c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 60,
"avg_line_length": 26.2,
"alnum_prop": 0.6717557251908397,
"repo_name": "Orange9000/Codewars",
"id": "fe2779c6c21e84fef1db96f0a945710336f7d623",
"size": "131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Solutions/beta/beta_most_common_first.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "57757"
},
{
"name": "Python",
"bytes": "53980"
}
],
"symlink_target": ""
} |
"""Functions to generate and load snapshot."""
import collections
import json
from qisys import ui
import qisrc.git
import qisrc.status
import qisrc.reset
class Snapshot(object):
""" Just a container for a git worktree snapshot """
def __init__(self):
self.refs = collections.OrderedDict()
self.manifest = qisrc.sync.LocalManifest()
self.format_version = None
def dump(self, output_path, deprecated_format=True):
""" Dump the snapshot into a human readable file """
if deprecated_format:
self._dump_deprecated(output_path)
else:
self._dump_json(output_path)
def _dump_deprecated(self, output_path):
srcs = self.refs.keys()
with open(output_path, 'w') as fp:
for src in srcs:
fp.write(src + ":" + self.refs[src] + "\n")
def _dump_json(self, output_path):
with open(output_path, "w") as fp:
serializable_manifest = dict()
serializable_manifest["url"] = self.manifest.url
serializable_manifest["branch"] = self.manifest.branch
serializable_manifest["groups"] = self.manifest.groups
if self.manifest.ref:
serializable_manifest["ref"] = self.manifest.ref
to_dump = {
"format" : 2,
"manifest" : serializable_manifest,
"refs" : self.refs
}
json.dump(to_dump, fp, indent=2)
def load(self, source):
""" Load a snapshot from a file path or a file object """
# Try to open, else assume it's a file object
try:
fp = open(source, "r")
data = fp.read()
except TypeError:
data = source.read()
try:
parsed = json.loads(data)
self._load_json(parsed)
except ValueError:
self._load_deprecated(data)
try:
source.close()
except AttributeError:
pass
def _load_deprecated(self, source):
for line in source.splitlines():
try:
(src, sha1) = line.split(":")
except ValueError:
ui.error("could not parse", line)
continue
src = src.strip()
sha1 = sha1.strip()
self.refs[src] = sha1
def _load_json(self, parsed_json):
self.format_version = parsed_json["format"]
if self.format_version == 1:
manifest_json = parsed_json["manifests"]["default"]
elif self.format_version == 2:
manifest_json = parsed_json["manifest"]
else:
raise Exception("unknown format: %s" % self.format_version)
self.refs = parsed_json["refs"]
for key, value in manifest_json.iteritems():
setattr(self.manifest, key, value)
def __eq__(self, other):
if not isinstance(other, Snapshot):
return False
return other.refs == self.refs and other.manifest == self.manifest
def __ne__(self, other):
return not self.__eq__(other)
def generate_snapshot(git_worktree, output_path, deprecated_format=True):
snapshot = git_worktree.snapshot()
return snapshot.dump(output_path, deprecated_format=deprecated_format)
ui.info(ui.green, "Snapshot generated in", ui.white, output_path)
def load_snapshot(git_worktree, input_path):
"""Load a snapshot file and reset projects."""
snapshot = Snapshot()
ui.info(ui.green, "Loading snapshot from", ui.white, input_path)
snapshot.load(input_path)
for (src, ref) in snapshot.refs.iteritems():
ui.info("Loading", src)
git_project = git_worktree.get_git_project(src, raises=False)
if git_project:
qisrc.reset.clever_reset_ref(git_project, ref)
| {
"content_hash": "1c0d48615a9708448eadcfc733d79006",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 74,
"avg_line_length": 34.5945945945946,
"alnum_prop": 0.5755208333333334,
"repo_name": "dmerejkowsky/qibuild",
"id": "56af6827113e8fbc283c57af8a1fd936a02fc95c",
"size": "4013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/qisrc/snapshot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7442"
},
{
"name": "C++",
"bytes": "22059"
},
{
"name": "CMake",
"bytes": "267118"
},
{
"name": "Java",
"bytes": "4132"
},
{
"name": "Makefile",
"bytes": "2222"
},
{
"name": "Nix",
"bytes": "563"
},
{
"name": "Python",
"bytes": "1145711"
},
{
"name": "Shell",
"bytes": "1085"
}
],
"symlink_target": ""
} |
import pytest
from fixture.application import Application
import json
import os.path
import importlib
import jsonpickle
from fixture.db import DbFixture
from fixture.orm import ORMFixture
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture
def app(request):
global fixture
#global target
web_config = load_config(request.config.getoption("--target"))['web']
browser = request.config.getoption("--browser")
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, base_url=web_config["baseUrl"])
fixture.session.ensure_login(username=web_config["username"], password=web_config["password"])
return fixture
@pytest.fixture(scope="session")
def db(request):
db_config = load_config(request.config.getoption("--target"))['db']
dbfixture = DbFixture(host=db_config['host'], name=db_config['name'], user=db_config['user'],
password=db_config['password'])
def fin():
dbfixture.destroy()
request.addfinalizer(fin)
return dbfixture
##########################
@pytest.fixture(scope="session")
def orm(request):
orm_config = load_config(request.config.getoption("--target"))['db']
ormfixture = ORMFixture(host=orm_config['host'], name=orm_config['name'], user=orm_config['user'],
password=orm_config['password'])
def fin():
pass #ormfixture.destroy()
request.addfinalizer(fin)
return ormfixture
############################
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--target", action="store", default="target.json")
parser.addoption("--check_ui", action="store_true")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdata = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
elif fixture.startswith("json_"):
testdata = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
def load_from_module(module):
return importlib.import_module("data.%s" % module).testdata
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/%s.json" % file)) as k:
return jsonpickle.decode(k.read())
| {
"content_hash": "14a796725955d88e77369c15e7d2d36d",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 102,
"avg_line_length": 30.112244897959183,
"alnum_prop": 0.6553710606574042,
"repo_name": "esemin83/python_training",
"id": "3187d7d0fb52bde830e89aefe0a7fda90015b74f",
"size": "2951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "1763"
},
{
"name": "HTML",
"bytes": "419542"
},
{
"name": "Python",
"bytes": "63185"
},
{
"name": "RobotFramework",
"bytes": "2121"
}
],
"symlink_target": ""
} |
from math import sqrt
from fractions import gcd
def get_factors(x):
factors = set([x])
sqrtX = int(sqrt(x))
for i in range(1, sqrtX + 1):
if x % i == 0:
factors.add(i)
factors.add(x / i)
return factors
def friendly():
_, friendly = [int(i) for i in raw_input().split()]
unfriendlies = [int(i) for i in raw_input().split()]
friendly_factors = get_factors(friendly)
unfriendly_factors = set()
for unfriendly in unfriendlies:
g = gcd(friendly, unfriendly)
unfriendly_factors.add(g)
unfriendly_factors.update(get_factors(g))
print len(friendly_factors - unfriendly_factors)
friendly()
| {
"content_hash": "8964548bf905eb9b743cfdb02ae60945",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 56,
"avg_line_length": 19.742857142857144,
"alnum_prop": 0.6063675832127352,
"repo_name": "mouradmourafiq/data-analysis",
"id": "e6e2a2b3405d73db1d8b32d48b06c202d0360907",
"size": "691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithmics/unfriendly.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "5473"
},
{
"name": "Python",
"bytes": "186704"
}
],
"symlink_target": ""
} |
"""b3j0f.middleware version module."""
__all__ = ['__version__']
# Store the version here so:
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into the utils module
# thanks to https://github.com/pycontribs/jira/blob/master/jira/version.py
#: project version
__version__ = '0.0.10'
| {
"content_hash": "918bcd1091a8b2e7e0672803eddf9dcd",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 74,
"avg_line_length": 31.083333333333332,
"alnum_prop": 0.6863270777479893,
"repo_name": "b3j0f/middleware",
"id": "72f542e08a5734aaa01731b5df5c4256e7f87ecc",
"size": "1693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "b3j0f/middleware/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20262"
}
],
"symlink_target": ""
} |
from abc import ABCMeta
from abc import abstractmethod
class CollectlSubsystem( object ):
""" Class providing an abstraction of collectl subsytems.
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def command_line_arg( self ):
""" Return single letter command-line argument used by collectl CLI.
"""
@property
@abstractmethod
def name( self, job_directory ):
""" High-level name for subsystem as consumed by this module.
"""
class ProcessesSubsystem( CollectlSubsystem ):
command_line_arg = "Z"
name = "process"
class CpuSubsystem( CollectlSubsystem ):
command_line_arg = "C"
name = "cpu"
class DiskSubsystem( CollectlSubsystem ):
command_line_arg = "D"
name = "disk"
class NetworkSubsystem( CollectlSubsystem ):
command_line_arg = "N"
name = "network"
class EnvironmentSubsystem( CollectlSubsystem ):
command_line_arg = "E"
name = "environment"
class MemorySubsystem( CollectlSubsystem ):
command_line_arg = "M"
name = "memory"
SUBSYSTEMS = [
ProcessesSubsystem(),
CpuSubsystem(),
DiskSubsystem(),
NetworkSubsystem(),
EnvironmentSubsystem(),
MemorySubsystem(),
]
SUBSYSTEM_DICT = dict( [ (s.name, s) for s in SUBSYSTEMS ] )
def get_subsystem( name ):
"""
>>> get_subsystem( "process" ).command_line_arg == "Z"
True
"""
return SUBSYSTEM_DICT[ name ]
__all__ = [ 'get_subsystem' ]
| {
"content_hash": "70a9e2394115a506cd6249ab3684d223",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 76,
"avg_line_length": 20.375,
"alnum_prop": 0.6421267893660532,
"repo_name": "ssorgatem/pulsar",
"id": "28e2bece559e557da0c5f03b89bd859e12f6cc27",
"size": "1467",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "galaxy/jobs/metrics/collectl/subsystems.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "241"
},
{
"name": "Makefile",
"bytes": "3661"
},
{
"name": "Python",
"bytes": "792577"
},
{
"name": "Shell",
"bytes": "12640"
}
],
"symlink_target": ""
} |
from app.common.target_urls import SHOP_DS_ID
from app.planes.jet_plane import JetPlane
class JetDSPlane(JetPlane):
engines_nb = 3
consumption_per_hour = 1510
fuel_capacity = 18050
minimum_kerosene_before_mission = fuel_capacity
# 7 (max hours one way) * speed * 2 (2 ways)
plane_range = 6454
plane_range_stopover = 10142
price = 2390000
shop_plane_type = SHOP_DS_ID
plane_capacity = 19
speed = 922
def __init__(self, **kwargs):
super(JetDSPlane, self).__init__(**kwargs)
| {
"content_hash": "d94ba08acdbb333ad81fbc815c288b43",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 51,
"avg_line_length": 26.6,
"alnum_prop": 0.6560150375939849,
"repo_name": "egenerat/gae-django",
"id": "46de0edf1e971cc65cb53d046b158ec35132773e",
"size": "557",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/planes/jet_ds_plane.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37815"
},
{
"name": "HTML",
"bytes": "86170"
},
{
"name": "JavaScript",
"bytes": "94035"
},
{
"name": "Python",
"bytes": "4820796"
}
],
"symlink_target": ""
} |
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.util._decorators import (
deprecate_kwarg, Appender, Substitution, cache_readonly
)
__all__ = ['assert_frame_equal', 'assert_index_equal', 'assert_series_equal',
'data_klasses', 'frequencies', 'is_numeric_dtype', 'testing',
'cache_readonly', 'deprecate_kwarg', 'Appender', 'Substitution',
'make_dataframe', 'to_numpy', 'pandas_lt_1_0_0']
version = LooseVersion(pd.__version__)
pandas_lt_1_0_0 = version < LooseVersion('1.0.0')
try:
from pandas.api.types import is_numeric_dtype
except ImportError:
from pandas.core.common import is_numeric_dtype
try:
from pandas.tseries import offsets as frequencies
except ImportError:
from pandas.tseries import frequencies
data_klasses = (pd.Series, pd.DataFrame)
try:
import pandas.testing as testing
except ImportError:
import pandas.util.testing as testing
assert_frame_equal = testing.assert_frame_equal
assert_index_equal = testing.assert_index_equal
assert_series_equal = testing.assert_series_equal
try:
from pandas._testing import makeDataFrame as make_dataframe
except ImportError:
import string
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
rands_chars = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
retval = (np.random.choice(rands_chars, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def make_dataframe():
"""
Simple verion of pandas._testing.makeDataFrame
"""
n = 30
k = 4
index = pd.Index(rands_array(nchars=10, size=n), name=None)
data = {c: pd.Series(np.random.randn(n), index=index)
for c in string.ascii_uppercase[:k]}
return pd.DataFrame(data)
def to_numpy(po: pd.DataFrame) -> np.ndarray:
"""
Workaround legacy pandas lacking to_numpy
Parameters
----------
po : Pandas obkect
Returns
-------
ndarray
A numpy array
"""
try:
return po.to_numpy()
except AttributeError:
return po.values
| {
"content_hash": "4019d843cca37077e71c363674799e52",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 77,
"avg_line_length": 27.674418604651162,
"alnum_prop": 0.626890756302521,
"repo_name": "jseabold/statsmodels",
"id": "5d81e3b62c42ab271d52b2cf234fb0bf8268ac28",
"size": "2380",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "statsmodels/compat/pandas.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "1383"
},
{
"name": "Python",
"bytes": "8609450"
},
{
"name": "R",
"bytes": "34228"
},
{
"name": "Stata",
"bytes": "41179"
}
],
"symlink_target": ""
} |
class DatabaseAsAServiceApi(object):
def __init__(self, databaseinfra, credentials):
self.databaseinfra = databaseinfra
self.credentials = credentials
@property
def user(self):
return self.credentials.user
@property
def password(self):
return self.credentials.password
@property
def endpoint(self):
return self.credentials.endpoint
@property
def client_group_host(self):
return list(self.extra_parameters("group_host").values())
@property
def client_group_database(self):
return list(self.extra_parameters("group_database").values())
def extra_parameters(self, group):
return self.credentials.get_parameters_by_group(group)
@property
def main_clientgroup(self):
return self.credentials.get_parameter_by_name("main_clientgroup")
@property
def alarm_notes(self):
return self.credentials.get_parameter_by_name("alarm_notes")
@property
def instances(self):
return self.databaseinfra.instances.all()
@property
def driver(self):
return self.databaseinfra.get_driver()
@property
def database_instances(self):
return self.driver.get_database_instances()
@property
def non_database_instances(self):
return self.driver.get_non_database_instances()
@property
def hosts(self):
if self.using_agent:
return []
return list({instance.hostname for instance in self.instances})
@property
def databaseifra_name(self):
return self.databaseinfra.name
@property
def mysql_infra_dns_from_endpoint_dns(self):
return self.databaseinfra.endpoint_dns.split(':')[0]
@property
def is_ha(self):
return self.databaseinfra.plan.is_ha
@property
def engine_name(self):
return self.databaseinfra.engine.engine_type.name
@property
def engine_version(self):
return self.databaseinfra.engine_patch.full_version
@property
def slack_notification(self):
return self.credentials.get_parameter_by_name("slack_notification")
@property
def database(self):
return self.databaseinfra.databases.first()
@property
def organization_hostgroup(self):
organization = self.database.team.organization
if organization:
return organization.get_grafana_hostgroup_external_org()
return None
@property
def using_agent(self):
zabbix_agent = self.credentials.get_parameter_by_name("zabbix_agent")
if zabbix_agent.lower() == 'true':
return True
return False
| {
"content_hash": "b47091bb50c4e80234c5750dd3606ec2",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 77,
"avg_line_length": 26.87878787878788,
"alnum_prop": 0.6602780909432544,
"repo_name": "globocom/dbaas-zabbix",
"id": "99793eb6f7318d837c3fc7bb7bd9b818f86a8c0f",
"size": "2687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas_zabbix/dbaas_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2416"
},
{
"name": "Python",
"bytes": "58819"
},
{
"name": "Shell",
"bytes": "30"
}
],
"symlink_target": ""
} |
"""Unit test package for agronopy."""
| {
"content_hash": "101e5db65ba290f8565318f69f7db9d8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 37,
"avg_line_length": 38,
"alnum_prop": 0.6842105263157895,
"repo_name": "farmlab/AgronoPy",
"id": "aa906afdf26f9098a1de9bb2138959c56bf237af",
"size": "63",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "260286"
},
{
"name": "Python",
"bytes": "86057"
},
{
"name": "Ruby",
"bytes": "691"
},
{
"name": "TeX",
"bytes": "2620"
}
],
"symlink_target": ""
} |
'''Example script showing how to use stateful RNNs
to model long sequences efficiently.
'''
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, LSTM
# since we are using stateful rnn tsteps can be set to 1
tsteps = 1
batch_size = 25
epochs = 25
# number of elements ahead that are used to make the prediction
lahead = 1
def gen_cosine_amp(amp=100, period=1000, x0=0, xn=50000, step=1, k=0.0001):
"""Generates an absolute cosine time series with the amplitude
exponentially decreasing
Arguments:
amp: amplitude of the cosine function
period: period of the cosine function
x0: initial x of the time series
xn: final x of the time series
step: step of the time series discretization
k: exponential rate
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(2 * np.pi * idx / period)
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
print('Generating Data...')
cos = gen_cosine_amp()
print('Input shape:', cos.shape)
expected_output = np.zeros((len(cos), 1))
for i in range(len(cos) - lahead):
expected_output[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Output shape:', expected_output.shape)
print('Creating Model...')
model = Sequential()
model.add(LSTM(50,
input_shape=(tsteps, 1),
batch_size=batch_size,
return_sequences=True,
stateful=True))
model.add(LSTM(50,
return_sequences=False,
stateful=True))
model.add(Dense(1))
model.compile(loss='mse', optimizer='rmsprop')
print('Training')
for i in range(epochs):
print('Epoch', i, '/', epochs)
# Note that the last state for sample i in a batch will
# be used as initial state for sample i in the next batch.
# Thus we are simultaneously training on batch_size series with
# lower resolution than the original series contained in cos.
# Each of these series are offset by one step and can be
# extracted with cos[i::batch_size].
model.fit(cos, expected_output,
batch_size=batch_size,
epochs=1,
verbose=1,
shuffle=False)
model.reset_states()
print('Predicting')
predicted_output = model.predict(cos, batch_size=batch_size)
print('Plotting Results')
plt.subplot(2, 1, 1)
plt.plot(expected_output)
plt.title('Expected')
plt.subplot(2, 1, 2)
plt.plot(predicted_output)
plt.title('Predicted')
plt.show()
| {
"content_hash": "aeca561e6cc2c3856736c8f0467c93aa",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 75,
"avg_line_length": 29.255555555555556,
"alnum_prop": 0.6460311431826814,
"repo_name": "trhongbinwang/data_science_journey",
"id": "dd9082ef07a8da1681755740f6a0c04d23cba60f",
"size": "2633",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "deep_learning/keras/examples/stateful_lstm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "59381"
},
{
"name": "Python",
"bytes": "1101944"
},
{
"name": "Shell",
"bytes": "401"
}
],
"symlink_target": ""
} |
from __future__ import division
import logging
try:
import Queue as queue
except ImportError:
import queue
import requests
import rfc6266
from threading import Event, Thread
from six.moves.urllib.parse import urlsplit
from ..piece import *
from ..plugin import InputBase
logger = logging.getLogger(__name__)
class HttpInput(InputBase):
plugin_name = 'http'
protocols = ['http', 'https']
current_piece = None
pieces = None
initial_pieces = None
finished = False
def __init__(self, item, url, buffer_size=5, segments=12, piece_group_size=100, piece_config=None):
self.url = urlsplit(url)
self.size, self.filename, self.content_type = self.get_info()
self.buffer_size = buffer_size * segments
self.downloaders = []
self.segments = segments
self.piece_group_size = piece_group_size
self.piece_config = piece_config
def get_info(self):
logger.info('Getting piece config from url %r' % (self.url, ))
r = requests.head(self.url.geturl(), verify=False)
try:
size = r.headers.get('content-length')
size = int(size)
except ValueError:
raise Exception('Size is invalid (%r), unable to segmented download.' % (size, ))
#raise InvalidInputException('Size is invalid (%r), unable to segmented download.' % size)
filename = None
if r.headers.get('content-disposition'):
filename = rfc6266.parse_headers(r.headers['content-disposition']).filename_unsafe
if not filename:
url_filename = self.url.path.split('?')[0].split('/')[-1]
if url_filename:
filename = url_filename
return int(size), filename, r.headers.get('content-type')
def seek(self, pos):
logger.debug('Seeking to %s' % (pos, ))
if self.pieces is not None:
raise Exception('Unable to seek in an already sought file')
if self.piece_config:
piece_size = calc_piece_size(self.size, **self.piece_config)
else:
piece_size = None
self.initial_pieces = self.pieces = create_pieces(self.size, self.segments, piece_size=piece_size, start_position=pos)
q = queue.Queue()
for piece_group in split_pieces(self.pieces, self.segments, self.piece_group_size):
q.put(piece_group)
for i in range(self.segments):
d = Downloader(i, self.url, q)
pdt = Thread(target=d.start)
pdt.daemon = True
pdt.start()
d.thread = pdt
self.downloaders.append(d)
self.set_current_piece()
def set_current_piece(self):
for piece in self.pieces[:self.buffer_size]:
piece.can_download.set()
if self.pieces:
self.current_piece = self.pieces.pop(0)
def read(self, *args, **kwargs):
try:
return self._read(*args, **kwargs)
except:
logger.exception('Exception while reading')
def _read(self, num_bytes=1024*8):
if self.pieces is None:
self.seek(0)
if self.finished:
return b''
d = self.current_piece.read(num_bytes)
if not d:
self.set_current_piece()
d = self.current_piece.read(num_bytes)
if not d:
self.finished = True
return d
def close(self):
for downloader in self.downloaders:
downloader.stop()
class Downloader(object):
def __init__(self, name, url, piece_queue):
self.name = name
self.url = url
self.piece_queue = piece_queue
self.should_die = Event()
def start(self):
logging.info('Starting downloader %s' % (self.name, ))
while not self.piece_queue.empty() and not self.should_die.is_set():
try:
pieces = self.piece_queue.get_nowait()
except queue.Empty:
logger.info('Piece queue empty %s, bailing' % (self.name, ))
break
logger.info('We got pieces: %r' % (pieces, ))
range_header = ','.join(['%i-%i' % (p.start_byte, p.end_byte - (p.last_piece and 1 or 0)) for p in pieces])
r = requests.get(self.url.geturl(), headers={'range': 'bytes=%s' % range_header}, stream=True, verify=False)
is_multipart = 'multipart/byteranges' in r.headers.get('content-type')
r_iter = r.iter_content(8196*2)
buffer = b''
while pieces:
piece = pieces.pop(0)
while not piece.can_download.wait(2):
logger.debug('Waiting for piece %r to be downloadable' % (piece, ))
if self.should_die.is_set():
return
logger.debug('Starting to fetch piece: %r' % piece)
bytes_left = piece.size
first = True
for chunk in r_iter:
if not chunk:
logger.error('End of data before end of piece.')
buffer += chunk
if first and is_multipart:
try:
end_of = buffer.index(b'\r\n\r\n')
except ValueError:
logger.warning('End of header was not in the first part of the chunk, trying to read more data')
continue
first = False
buffer = buffer[end_of+4:]
bytes_to_write = buffer[:bytes_left]
buffer = buffer[bytes_left:]
piece.write(bytes_to_write)
bytes_left -= len(bytes_to_write)
if bytes_left <= 0:
piece.set_complete()
break
if self.should_die.is_set():
return
logger.debug('Done fetching piece: %r' % piece)
logger.info('Downloader %s dying' % (self.name, ))
def stop(self):
logger.info('Stopping %s' % (self.name, ))
self.should_die.set()
| {
"content_hash": "723cd4f21cc1bc1d6c3c923973b1211a",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 126,
"avg_line_length": 32.50261780104712,
"alnum_prop": 0.5393041237113402,
"repo_name": "JohnDoee/thomas",
"id": "7982e1c28770f1d0a79f29352fd6960ce9efc09f",
"size": "6208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thomas/inputs/http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87709"
}
],
"symlink_target": ""
} |
"""
This module add the class use to handle smpacket.
All the controllers placed in the controllers folder will be loaded.
You can also add your own controller in the plugins folder.
:Example:
Here's a simple Controller which will send a HelloWorld on every
PingMessage::
class ControllerHelloWorld(StepmaniaController):
command = smpacket.SMClientCommand.NSCPing
require_login = False
def handle(self):
serv.send_message("Hello world", to="me")
"""
from datetime import datetime
from smserver.smutils import smpacket
from smserver import models, ability
from smserver.chathelper import with_color
class StepmaniaController(object):
"""
Inherit from this class to add an action every time you will see a
specific packet.
A new instance of StepmaniaController is instantiate on each incoming
packet.
:param server: The main server instance.
:param conn: The connection from who the packet is comming from
:param packet: The packet send
:param session: A session object use to interact with the database
:type server: smserver.server.StepmaniaServer
:type conn: smserver.smutils.smconn.StepmaniaConn
:type packet: smserver.smutils.smpacket.SMPacket
:type session: sqlalchemy.orm.session.Session
"""
command = None
"""
The command to handle in this controller.
:rtype: smserver.smutils.smpacket.SMClientCommand
"""
require_login = False
"""
Specify if the user has to be log in.
:rtype: bool
"""
def __init__(self, server, conn, packet, session):
self.server = server
self.conn = conn
self.packet = packet
self.session = session
self.log = self.server.log
self._room = None
self._users = None
self._room_users = None
self._song = None
@property
def room(self):
"""
The room object where the user is.
Return None if the user is not in a room
"""
if not self.conn.room:
return None
if not self._room:
self._room = self.session.query(models.Room).get(self.conn.room)
return self._room
def song(self):
"""
Return the song object the last song the user have selected.
Return None if there is no such song.
"""
if not self.conn.song:
return None
if not self._song:
self._song = self.session.query(models.Song).get(self.conn.song)
return self._song
@property
def users(self):
"""
Return the list of connected user's object.
"""
if not self._users:
self._users = models.User.from_ids(self.conn.users, self.session)
return self._users
@property
def active_users(self):
"""
Return the list of connected user's object which are still online.
"""
return [user for user in self.users if user.online]
@property
def room_users(self):
"""
Return the list of user currently in the same room
"""
if not self._room_users:
self._room_users = self.session.query(models.User).filter_by(romm_id=self.conn.room)
return self._room_users
def user_repr(self, room_id=None):
"""
Textual representation of the users connected on this connection.
:param int room_id: The ID of the room
"""
return models.User.users_repr(self.active_users, room_id)
def colored_user_repr(self, room_id=None):
"""
Colored textual representation of the users connected on this
connection. Use it when sending chat message
:param int room_id: The ID of the room
"""
return models.User.colored_users_repr(self.active_users, room_id)
def level(self, room_id=None):
"""
The maximum level of the users in this connection
:param room_id: The ID of the room.
:type room_id: int
:return: Level of the user
:rtype: int
"""
return max(user.level(room_id) for user in self.active_users)
def can(self, action, room_id=None):
"""
Return True if this connection can do the specified action
:param action: The action to do
:param room_id: The ID of the room where the action take place.
:type action: smserver.ability.Permissions
:type room_id: int
:return: True if the action in authorized
"""
return ability.Ability.can(action, self.level(room_id))
def cannot(self, action, room_id=None):
"""
Return True if this connection cannot do the specified action
:param action: The action to do
:param room_id: The ID of the room where the action take place.
:type action: smserver.ability.Permissions
:type room_id: int
:return: True if the action in unauthorized
"""
return ability.Ability.cannot(action, self.level(room_id))
def handle(self):
"""
This method is call on every incoming packet.
Do all the stuff you need here.
"""
pass
def send(self, packet):
"""
Send the specified packet to the current connection
:param packet: The packet to send
:type packet: smserver.smutils.smpacket.SMPacket
:return: nothing
"""
self.conn.send(packet)
def sendall(self, packet):
"""
Send the specified packet to all the connections on the server
:param packet: The packet to send
:type packet: smserver.smutils.smpacket.SMPacket
:return: nothing
"""
self.server.sendall(packet)
def sendroom(self, room, packet):
"""
Send the specified packet to all the connection in the specified room
:param room_id: The ID of the room
:param packet: The packet to send
:type room_id: int
:type packet: smserver.smutils.smpacket.SMPacket
:return: nothing
"""
self.server.sendroom(room, packet)
def sendingame(self, room_id, packet):
"""
Send the specified packet to all the players in the specified room
which have send an NSCGSR packet
:param room_id: The ID of the room
:param packet: The packet to send
:type room_id: int
:type packet: smserver.smutils.smpacket.SMPacket
:return: nothing
"""
self.server.sendingame(room_id, packet)
def sendplayers(self, room_id, packet):
"""
Send the specified packet to all the players in the specified room
(Not spectator)
:param room_id: The ID of the room
:param packet: The packet to send
:type room_id: int
:type packet: smserver.smutils.smpacket.SMPacket
:return: nothing
"""
self.server.sendplayers(room_id, packet)
def send_message(self, message, to=None, room_id=None):
"""
Send a chat message
:param str message: The message to send.
:param str to: Send the message to ? (room, all, me). Default to room
:param int room_id: A specific room. Default to the room connection.
"""
if to == "me":
self.server.send_message(message, conn=self.conn)
return
if to == "all":
self.server.send_message(message)
return
if not room_id:
room = self.room
else:
room = self.session.query(models.Room).get(room_id)
self.server.send_message(message, room)
def send_user_message(self, message, to=None):
"""
Same as send_message but prepend the user repr to the message
:param str message: The message to send.
:param str to: Send the message to ? (room, all, me). Default to room
"""
self.send_message(
"%s: %s" % (
self.colored_user_repr(self.conn.room),
message),
to)
| {
"content_hash": "769895af289c59015014ea083d03d057",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 96,
"avg_line_length": 28.776271186440677,
"alnum_prop": 0.5789845682648133,
"repo_name": "Nickito12/stepmania-server",
"id": "2399b0fa518af7151de8cb286a5b1e0839a4453f",
"size": "8536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smserver/stepmania_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "287957"
},
{
"name": "Shell",
"bytes": "2365"
}
],
"symlink_target": ""
} |
import os
import urllib.request
import util.httpfetch2 as httpfetch2
from index import SysConst
from util import Log
import sys
def getFilePath(actor, avNumber):
return SysConst.getImageCachePath() + actor + "//" + avNumber + ".jpg"
def checkDirPath(actor):
path = SysConst.getImageCachePath() + actor
#Log.info(sys.getdefaultencoding() + " path: " + path)
exist = os.path.exists(path)
if not exist:
os.mkdir(path)
def checkFile(filePath):
return os.path.exists(filePath)
def saveFileByRequest(url, filePath):
img = httpfetch2.getImage(url)
if img:
file = open(filePath, "wb")
file.write(img)
file.close()
else:
raise Exception("can not load image: " + url)
def saveFileByURL(url, filePath):
urllib.request.urlretrieve(url, filePath)
def saveFile(url, filePath):
saveFileByRequest(url, filePath)
def saveImage(av):
actor = av["actor"]
shortName = av["short_name"]
avNumber = av["av_number"]
url = av["remote_cover"]
url = url.replace("www.nh87.cn", "imgs.nh87.cn")
url = url.replace("//", "")
filePath = getFilePath(shortName, avNumber)
if not(checkFile(filePath)):
Log.info("begin save file: " + filePath)
Log.info(url)
saveFile(url, filePath)
return True
else:
return False
#av = {'av_number': 'IPZ-976', 'remote_cover': 'http://www.nh87.cn/uploads/2017/06/ipz976pl.jpg', 'actor': '樱空桃'}
#saveImage(av)
#checkDirPath("ABC")
def isExistImage(av):
actor = av["actor"]
avNumber = av["av_number"]
url = av["remote_cover"]
url = url.replace("www.nh87.cn", "imgs.nh87.cn")
filePath = getFilePath(actor, avNumber)
return checkFile(filePath) | {
"content_hash": "26458fdf5954db36a2a3baff4ba866ac",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 113,
"avg_line_length": 26.130434782608695,
"alnum_prop": 0.6211869107043816,
"repo_name": "pythonlittleboy/python_gentleman_crawler",
"id": "67029850228648b365eb89754b0e61d2cc2fcefc",
"size": "1834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "index/ImageIO.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "981"
},
{
"name": "HTML",
"bytes": "5106"
},
{
"name": "JavaScript",
"bytes": "258979"
},
{
"name": "PLpgSQL",
"bytes": "3601709"
},
{
"name": "Python",
"bytes": "109160"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tempest_lib import exceptions
from six import string_types
from six.moves.urllib.parse import quote_plus
from functionaltests.common import datagen
from functionaltests.common import utils
from functionaltests.api.v2.security_utils import FuzzFactory
from functionaltests.api.v2.base import DesignateV2Test
from functionaltests.api.v2.clients.recordset_client import RecordsetClient
from functionaltests.api.v2.models.recordset_model import RecordsetModel
from functionaltests.api.v2.clients.zone_client import ZoneClient
fuzzer = FuzzFactory()
@utils.parameterized_class
class RecordsetFuzzTest(DesignateV2Test):
def setUp(self):
super(RecordsetFuzzTest, self).setUp()
# self.increase_quotas(user='admin')
resp, self.zone = ZoneClient.as_user('default').post_zone(
datagen.random_zone_data())
# ZoneClient.as_user('default').wait_for_zone(self.zone.id)
self.client = RecordsetClient.as_user('default')
def tearDown(self):
super(RecordsetFuzzTest, self).tearDown()
ZoneClient.as_user('default').delete_zone(self.zone.id)
@utils.parameterized(fuzzer.get_param_datasets(
['accept', 'content-type'],
['content_types', 'junk', 'sqli', 'xss', 'rce']
))
def test_fuzzed_header(self, parameter, fuzz_type, payload):
"""Create A RRSet, fuzzing Accept & Content-Type headers"""
model = datagen.random_a_recordset(self.zone.name)
headers = {
'content-type': 'application/json',
'accept': ''
}
headers[parameter] = payload.encode('utf-8')
result = fuzzer.verify_tempest_exception(
self.client.post_recordset, fuzz_type, self.zone.id, model,
headers=headers)
self.assertTrue(result['status'])
self.assertNotIn(result['resp'].status, range(500, 600))
@utils.parameterized(fuzzer.get_param_datasets(
['type', 'name', 'records', 'ttl', 'description'],
['number', 'junk', 'sqli', 'xss', 'rce']
))
def test_fuzzed_record(self, parameter, fuzz_type, payload):
"""Create A RRSet, fuzzing each param"""
model = datagen.random_a_recordset(self.zone.name)
model.__dict__[parameter] = payload
result = fuzzer.verify_tempest_exception(
self.client.post_recordset, fuzz_type, self.zone.id, model
)
self.assertTrue(result['status'])
if result['exception']:
self.assertIsInstance(result['exception'], exceptions.BadRequest)
@utils.parameterized(fuzzer.get_param_datasets(
['MX', 'NS', 'AAAA', 'CNAME', 'TXT', 'SSHFP', 'SPF', 'SRV', 'PTR'],
['number', 'junk', 'sqli', 'xss', 'rce']
))
def test_create_fuzzed_record(self, parameter, fuzz_type, payload):
"""Create each RRSet type with fuzzed 'records' param"""
model = RecordsetModel.from_dict({
'type': parameter,
'name': self.zone.name,
'records': [payload],
'ttl': 1500})
result = fuzzer.verify_tempest_exception(
self.client.post_recordset, fuzz_type, self.zone.id, model
)
self.assertTrue(result['status'])
if result['exception']:
self.assertIsInstance(result['exception'], exceptions.BadRequest)
@utils.parameterized(fuzzer.get_datasets(
['number', 'junk', 'sqli', 'xss', 'rce', 'url']
))
def test_get_record_fuzzed_id(self, fuzz_type, payload):
"""Get non-existant RRSet with fuzz payload as RRSet ID"""
if isinstance(payload, string_types):
payload = quote_plus(payload.encode('utf-8'))
result = fuzzer.verify_tempest_exception(
self.client.get_recordset, fuzz_type, self.zone.id,
payload
)
self.assertTrue(result['status'])
if result['exception']:
try:
self.assertIsInstance(result['exception'], exceptions.NotFound)
except:
self.assertIsInstance(
result['exception'], exceptions.BadRequest)
@utils.parameterized(fuzzer.get_param_datasets(
['limit', 'marker', 'sort_key', 'sort_dir', 'type', 'name', 'ttl',
'data', 'description', 'status'],
['number', 'junk', 'sqli', 'xss', 'rce']
))
def test_get_records_fuzzed(self, parameter, fuzz_type, payload):
"""Get RRSet list, fuzzing each filter param"""
model = datagen.random_a_recordset(self.zone.name)
resp, post_resp_model = self.client.post_recordset(self.zone.id, model)
result = fuzzer.verify_tempest_exception(
self.client.list_recordsets, fuzz_type, self.zone.id,
filters={parameter: payload}
)
self.assertTrue(result['status'])
if result['exception']:
self.assertIsInstance(result['exception'], exceptions.BadRequest)
@utils.parameterized(fuzzer.get_param_datasets(
['type', 'name', 'records', 'ttl', 'description'],
['junk', 'sqli', 'xss', 'rce', 'huge']
))
def test_update_fuzzed_record(self, parameter, fuzz_type, payload):
"""Update a RecordSet, fuzzing each param"""
model = datagen.random_a_recordset(self.zone.name)
resp, post_resp_model = self.client.post_recordset(self.zone.id, model)
recordset_id = post_resp_model.id
model.__dict__[parameter] = payload
result = fuzzer.verify_tempest_exception(
self.client.put_recordset, fuzz_type, self.zone.id, recordset_id,
model
)
self.assertTrue(result['status'])
if result['exception']:
self.assertIsInstance(result['exception'], exceptions.BadRequest)
@utils.parameterized(fuzzer.get_datasets(
['number', 'junk', 'sqli', 'xss', 'rce']
))
def test_delete_records_fuzzed_id(self, fuzz_type, payload):
"""Delete non-existant RRSet with fuzz payload as RRSet ID"""
if isinstance(payload, string_types):
payload = quote_plus(payload.encode('utf-8'))
result = fuzzer.verify_tempest_exception(
self.client.delete_recordset, fuzz_type, self.zone.id,
payload
)
self.assertTrue(result['status'])
if result['exception']:
try:
self.assertIsInstance(result['exception'], exceptions.NotFound)
except:
self.assertIsInstance(
result['exception'], exceptions.BadRequest)
| {
"content_hash": "f4c583f2e9a316b183a059e79383b543",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 79,
"avg_line_length": 41.116279069767444,
"alnum_prop": 0.6329185520361991,
"repo_name": "cneill/designate-testing",
"id": "4fcb4fe2c4c76e398e923bb406cfb21faf815cb4",
"size": "7072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "functionaltests/api/v2/test_recordset_security.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2257800"
},
{
"name": "Ruby",
"bytes": "4170"
},
{
"name": "Shell",
"bytes": "12933"
}
],
"symlink_target": ""
} |
import sys
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from neo import api
from neo.models import NeoProfile
class Migration(DataMigration):
def forwards(self, orm):
count = 1
total = orm['neo.NeoProfile'].objects.count()
for np in orm['neo.NeoProfile'].objects.all().iterator():
password = NeoProfile.generate_password()
api.change_password(np.login_alias, password,
token=api.get_forgot_password_token(np.login_alias).TempToken)
np.password = password
np.save()
sys.stdout.write("\rReset %d out of %d" % (count, total))
sys.stdout.flush()
count += 1
print "\nDone"
def backwards(self, orm):
orm['neo.NeoProfile'].objects.all().update(password='')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'neo.neoprofile': {
'Meta': {'object_name': 'NeoProfile'},
'consumer_id': ('django.db.models.fields.PositiveIntegerField', [], {'primary_key': 'True'}),
'login_alias': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['neo']
symmetrical = True
| {
"content_hash": "4ae7cbf631597801d025bf3a745cbb15",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 182,
"avg_line_length": 59.16883116883117,
"alnum_prop": 0.5566286215978928,
"repo_name": "praekelt/jmbo-neo",
"id": "86ac3042c2628e6364f7484eb9bc0627b5c6bf01",
"size": "4580",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "neo/migrations/0007_reset_passwords.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "447536"
}
],
"symlink_target": ""
} |
'''
Created on Oct 11, 2015
@author: Francky
'''
from __future__ import print_function
from __future__ import division
import os
import glob
import re
import string
sids_detected_with_icd9 = [add list here]
def compute_has_diabetes(text):
'''
'''
regexp = re.compile(r'[\s{0}]DM[\s{0}]'.format(re.escape(string.punctuation)))
has_diabetes = ('diabetes' in text.lower()) or (regexp.search(text) is not None)
#print('has_diabetes: {0}'.format(has_diabetes))
if regexp.search(text) is not None:
print('regexp.search(text): {0}'.format(regexp.findall(text)))
# number_of_diabetes_patients: 14038
return has_diabetes
def compute_has_hemo(text):
case_insensitive_patterns = ['hemodialysis', 'haemodialysis', 'kidney dialysis', 'renal dialysis', 'extracorporeal dialysis']
case_sensitive_patterns = ['on HD', 'HD today', 'tunneled HD', 'continue HD', 'cont HD']
case_insensitive_pattern_results = [pattern in text.lower() for pattern in case_insensitive_patterns]
case_sensitive_pattern_results = [pattern in text for pattern in case_sensitive_patterns]
has_hemo = sum(case_insensitive_pattern_results) + sum(case_sensitive_pattern_results) > 0
if has_hemo:
print('case_insensitive_pattern_results: {0}'.format(case_insensitive_pattern_results))
print('case_sensitive_pattern_results: {0}'.format(case_sensitive_pattern_results))
#print('has_hemo: {0}'.format(has_hemo))
hemo_matched = case_insensitive_pattern_results + case_sensitive_pattern_results
return has_hemo, hemo_matched
def process_note(note_filepath, note_output_folder):
text = open(note_filepath, 'r').read()
note_filename = os.path.basename(note_filepath)
#print('text: {0}'.format(text))
# Step 1: has diabetes?
has_diabetes = compute_has_diabetes(text)
# Step 2: has hemo?
has_hemo, hemo_matched = compute_has_hemo(text)
if not (has_diabetes and has_hemo):
return has_diabetes, has_hemo, hemo_matched
# Step 3: remove history and copy note
print('remove family history and copy note')
sid = int(note_filename.replace('sid', '').replace('.txt', ''))
if sid in sids_detected_with_icd9:
output_note_filepath = os.path.join(note_output_folder, 'already_detected_in_icd9', note_filename)
else:
output_note_filepath = os.path.join(note_output_folder, 'only_detected_in_notes', note_filename)
output_note = open(output_note_filepath, 'w')
family_history_section = False
for line in text.split('\n'):
#print('line: {0}'.format(line))
if 'family history:' in line.lower():
family_history_section = True
output_note.write('Family History section removed\n\n')
print('Family History section removed')
if not family_history_section:
output_note.write(line+'\n')
if family_history_section and len(line.strip()) == 0: # If there is an empty line, it means that the family history section ended
family_history_section = False
output_note.close()
# Step 4: delete output if when Family History is removed there is no more diabetes or hemo
text = open(output_note_filepath, 'r').read()
has_diabetes = compute_has_diabetes(text)
has_hemo, hemo_matched = compute_has_hemo(text)
if not (has_diabetes and has_hemo):
os.remove(output_note_filepath)
print('file removed')
return has_diabetes, has_hemo, hemo_matched
def main():
'''
This is the main function
'''
#number_of_diabetes_patients = number_of_hemo_patients = number_of_hemo_and_diabetes_patients = 0
diabetes_patients = []
hemo_patients = []
note_folder = os.path.join('all_notes')
note_output_folder = os.path.join(note_folder, 'output')
count = 0
for note_filepath in glob.iglob(os.path.join(note_folder, 'sid*.txt')):
#for note_filepath in glob.iglob(os.path.join(note_folder, 'sid1114.txt')):
print('note_filepath: {0}'.format(note_filepath))
sid = int(os.path.basename(note_filepath).replace('sid', '').replace('.txt', ''))
has_diabetes, has_hemo, hemo_matched = process_note(note_filepath, note_output_folder)
if has_diabetes: diabetes_patients.append(sid)#number_of_diabetes_patients += 1
if has_hemo: hemo_patients.append(sid) #number_of_hemo_patients += 1
#if has_diabetes and has_hemo:number_of_hemo_and_diabetes_patients += 1
count += 1
'''
print('number_of_diabetes_patients: {0}'.format(number_of_diabetes_patients))
print('number_of_hemo_patients: {0}'.format(number_of_hemo_patients))
print('number_of_hemo_and_diabetes_patients: {0}'.format(number_of_hemo_and_diabetes_patients))
'''
print('number_of_diabetes_patients: {0}'.format(len(diabetes_patients)))
print('number_of_hemo_patients: {0}'.format(len(hemo_patients)))
print('diabetes_patients: {0}'.format(diabetes_patients))
print('hemo_patients: {0}'.format(hemo_patients))
if __name__ == "__main__":
main()
#cProfile.run('main()') # if you want to do some profiling
| {
"content_hash": "26b67b572744fb8682ff09254fa60168",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 137,
"avg_line_length": 41.34126984126984,
"alnum_prop": 0.6567479362641582,
"repo_name": "MIT-LCP/critical-data-book",
"id": "2aae19ca040f20ccb50761857cc5c2b825a86815",
"size": "5209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "part_iii/chapter_28/python/filter_notes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "170734"
},
{
"name": "Matlab",
"bytes": "559374"
},
{
"name": "PLSQL",
"bytes": "56601"
},
{
"name": "Prolog",
"bytes": "12305"
},
{
"name": "Python",
"bytes": "82377"
},
{
"name": "R",
"bytes": "73678"
},
{
"name": "SAS",
"bytes": "3965"
},
{
"name": "SQLPL",
"bytes": "4394"
}
],
"symlink_target": ""
} |
import unittest, os
from django.test import TestCase
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from django.test import LiveServerTestCase
from .pages.sign_in import SignInPage
from .pages.register import RegisterPage
TITLE_PREFIX = 'GrowthStreet Loans - '
class TestRegistration(LiveServerTestCase):
def setUp(self):
self.driver = webdriver.PhantomJS()
super(TestRegistration, self).setUp()
def tearDown(self):
self.driver.quit()
super(TestRegistration, self).tearDown()
'''
Users must be able to register and sign in
'''
def test_journey_register(self):
self.driver.get(self.live_server_url + "/")
# We must end up on the authentication page
self.assertEquals(TITLE_PREFIX + 'Sign In', self.driver.title);
SignInPage.click_register_link(self.driver)
# We must end up on the registration page
self.assertEquals(TITLE_PREFIX + 'Register', self.driver.title);
# Registering
RegisterPage.complete_form(self.driver, 'John', 'Doe', '[email protected]', 'correct-horse-battery-staple', '+44 7765 222 4567')
# No alerts should appear
error_message = self.get_element('error-message').text
self.assertEquals('', error_message)
# PhantomJS fails to follow redirects
# Manually redirecting
self.driver.get(self.live_server_url + "/")
# We must end up back on the authentication page
self.assertEquals(TITLE_PREFIX + 'Sign In', self.driver.title);
# Signing in must work
SignInPage.sign_in(self.driver, '[email protected]', 'correct-horse-battery-staple')
# No alerts should appear
error_message = self.get_element('error-message').text
self.assertEquals('', error_message)
# PhantomJS fails to follow some redirects
# Manually redirecting, next assertion will fail if logged in failed
self.driver.get(self.live_server_url + "/dashboard")
# User should be redirected to the homepage/dashboard once logged in
self.assertEquals(TITLE_PREFIX + 'Homepage', self.driver.title);
'''
Users must not be able to access anything other than the sign in page unless
they are signed in
'''
def test_access(self):
pass
# Shortcut for find_element_by_id
def get_element(self, id):
return self.driver.find_element_by_id(id)
| {
"content_hash": "d3fef5b93bcf129fb57ae06c7e7f05ed",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 136,
"avg_line_length": 33.08974358974359,
"alnum_prop": 0.6784192173576133,
"repo_name": "Amandil/django-tech-test",
"id": "94eb46f94d24e498f082a4baf5f1a3fa8ed0a88a",
"size": "2581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loans/tests/tests_ui_authentication.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "163374"
},
{
"name": "HTML",
"bytes": "15058"
},
{
"name": "JavaScript",
"bytes": "3348443"
},
{
"name": "PHP",
"bytes": "10224"
},
{
"name": "Python",
"bytes": "61756"
},
{
"name": "Shell",
"bytes": "496"
}
],
"symlink_target": ""
} |
"""
* references
** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic
"""
import netaddr
from neutron_lib import constants as lib_consts
from neutron_lib.plugins.ml2 import ovs_constants as constants
from os_ken.lib.packet import ether_types
from os_ken.lib.packet import icmpv6
from os_ken.lib.packet import in_proto
from oslo_log import log as logging
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import br_dvr_process
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import ovs_bridge
LOG = logging.getLogger(__name__)
# TODO(liuyulong): move to neutron-lib.
IPV4_NETWORK_BROADCAST = "255.255.255.255"
# All_DHCP_Relay_Agents_and_Servers
# [RFC8415] https://datatracker.ietf.org/doc/html/rfc8415
IPV6_All_DHCP_RELAY_AGENYS_AND_SERVERS = "ff02::1:2"
class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge,
br_dvr_process.OVSDVRInterfaceMixin):
"""openvswitch agent br-int specific logic."""
of_tables = constants.INT_BR_ALL_TABLES
def setup_default_table(self, enable_openflow_dhcp=False,
enable_dhcpv6=False):
(_dp, ofp, ofpp) = self._get_dp()
self.setup_canary_table()
self.install_goto(dest_table_id=constants.PACKET_RATE_LIMIT)
self.install_goto(dest_table_id=constants.TRANSIENT_TABLE,
table_id=constants.PACKET_RATE_LIMIT)
self.install_normal(table_id=constants.TRANSIENT_TABLE, priority=3)
self.init_dhcp(enable_openflow_dhcp=enable_openflow_dhcp,
enable_dhcpv6=enable_dhcpv6)
self.install_drop(table_id=constants.ARP_SPOOF_TABLE)
self.install_drop(
table_id=constants.LOCAL_SWITCHING,
priority=constants.OPENFLOW_MAX_PRIORITY,
vlan_vid=ofp.OFPVID_PRESENT | constants.DEAD_VLAN_TAG,
)
# When openflow firewall is not enabled, we use this table to
# deal with all egress flow.
self.install_normal(table_id=constants.TRANSIENT_EGRESS_TABLE,
priority=3)
# Local IP defaults
self.install_goto(dest_table_id=constants.PACKET_RATE_LIMIT,
table_id=constants.LOCAL_EGRESS_TABLE)
self.install_goto(dest_table_id=constants.PACKET_RATE_LIMIT,
table_id=constants.LOCAL_IP_TABLE)
def init_dhcp(self, enable_openflow_dhcp=False, enable_dhcpv6=False):
if not enable_openflow_dhcp:
return
# DHCP IPv4
self.install_goto(dest_table_id=constants.DHCP_IPV4_TABLE,
table_id=constants.TRANSIENT_TABLE,
priority=101,
eth_type=ether_types.ETH_TYPE_IP,
ip_proto=in_proto.IPPROTO_UDP,
ipv4_dst=IPV4_NETWORK_BROADCAST,
udp_src=lib_consts.DHCP_CLIENT_PORT,
udp_dst=lib_consts.DHCP_RESPONSE_PORT)
self.install_drop(table_id=constants.DHCP_IPV4_TABLE)
if not enable_dhcpv6:
return
# DHCP IPv6
self.install_goto(dest_table_id=constants.DHCP_IPV6_TABLE,
table_id=constants.TRANSIENT_TABLE,
priority=101,
eth_type=ether_types.ETH_TYPE_IPV6,
ip_proto=in_proto.IPPROTO_UDP,
ipv6_dst=IPV6_All_DHCP_RELAY_AGENYS_AND_SERVERS,
udp_src=lib_consts.DHCPV6_CLIENT_PORT,
udp_dst=lib_consts.DHCPV6_RESPONSE_PORT)
self.install_drop(table_id=constants.DHCP_IPV6_TABLE)
def add_dhcp_ipv4_flow(self, port_id, ofport, port_mac):
(_dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=in_proto.IPPROTO_UDP,
in_port=ofport,
eth_src=port_mac,
udp_src=68,
udp_dst=67)
actions = [
ofpp.OFPActionOutput(ofp.OFPP_CONTROLLER, 0),
]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
self.install_instructions(table_id=constants.DHCP_IPV4_TABLE,
priority=100,
instructions=instructions,
match=match)
def add_dhcp_ipv6_flow(self, port_id, ofport, port_mac):
(_dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(eth_type=ether_types.ETH_TYPE_IPV6,
ip_proto=in_proto.IPPROTO_UDP,
in_port=ofport,
eth_src=port_mac,
udp_src=546,
udp_dst=547)
actions = [
ofpp.OFPActionOutput(ofp.OFPP_CONTROLLER, 0),
]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
self.install_instructions(table_id=constants.DHCP_IPV6_TABLE,
priority=100,
instructions=instructions,
match=match)
def del_dhcp_flow(self, ofport, port_mac):
self.uninstall_flows(table_id=constants.DHCP_IPV4_TABLE,
eth_type=ether_types.ETH_TYPE_IP,
ip_proto=in_proto.IPPROTO_UDP,
in_port=ofport,
eth_src=port_mac,
udp_src=68,
udp_dst=67)
self.uninstall_flows(table_id=constants.DHCP_IPV6_TABLE,
eth_type=ether_types.ETH_TYPE_IPV6,
ip_proto=in_proto.IPPROTO_UDP,
in_port=ofport,
eth_src=port_mac,
udp_src=546,
udp_dst=547)
def setup_canary_table(self):
self.install_drop(constants.CANARY_TABLE)
def check_canary_table(self):
try:
flows = self.dump_flows(constants.CANARY_TABLE)
except RuntimeError:
LOG.exception("Failed to communicate with the switch")
return constants.OVS_DEAD
return constants.OVS_NORMAL if flows else constants.OVS_RESTARTED
@staticmethod
def _local_vlan_match(_ofp, ofpp, port, vlan_vid):
return ofpp.OFPMatch(in_port=port, vlan_vid=vlan_vid)
def provision_local_vlan(self, port, lvid, segmentation_id):
(_dp, ofp, ofpp) = self._get_dp()
if segmentation_id is None:
vlan_vid = ofp.OFPVID_NONE
actions = [ofpp.OFPActionPushVlan()]
else:
vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
actions = []
match = self._local_vlan_match(ofp, ofpp, port, vlan_vid)
actions += [
ofpp.OFPActionSetField(vlan_vid=lvid | ofp.OFPVID_PRESENT),
]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
ofpp.OFPInstructionGotoTable(table_id=constants.PACKET_RATE_LIMIT),
]
self.install_instructions(
instructions=instructions,
priority=3,
match=match,
)
def reclaim_local_vlan(self, port, segmentation_id):
(_dp, ofp, ofpp) = self._get_dp()
if segmentation_id is None:
vlan_vid = ofp.OFPVID_NONE
else:
vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
match = self._local_vlan_match(ofp, ofpp, port, vlan_vid)
self.uninstall_flows(match=match)
@staticmethod
def _arp_dvr_dst_mac_match(ofp, ofpp, vlan, dvr_mac):
# If eth_dst is equal to the dvr mac of this host, then
# flag it as matched.
if not vlan:
return ofpp.OFPMatch(vlan_vid=ofp.OFPVID_NONE, eth_dst=dvr_mac)
return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT,
eth_dst=dvr_mac)
@staticmethod
def _dvr_dst_mac_table_id(network_type):
if network_type in constants.DVR_PHYSICAL_NETWORK_TYPES:
return constants.ARP_DVR_MAC_TO_DST_MAC_PHYSICAL
else:
return constants.ARP_DVR_MAC_TO_DST_MAC
def install_dvr_dst_mac_for_arp(self, network_type,
vlan_tag, gateway_mac, dvr_mac, rtr_port):
table_id = self._dvr_dst_mac_table_id(network_type)
# Match the destination MAC with the DVR MAC
(_dp, ofp, ofpp) = self._get_dp()
match = self._arp_dvr_dst_mac_match(ofp, ofpp, vlan_tag, dvr_mac)
# Incoming packet will come with destination MAC of DVR host MAC from
# the ARP Responder. The Source MAC in this case will have the source
# MAC of the port MAC that responded from the ARP responder.
# So just remove the DVR host MAC from the 'eth_dst' and replace it
# with the gateway-mac. The packet should end up in the right the table
# for the packet to reach the router interface.
actions = [
ofpp.OFPActionSetField(eth_dst=gateway_mac),
ofpp.OFPActionPopVlan(),
ofpp.OFPActionOutput(rtr_port, 0)
]
self.install_apply_actions(table_id=table_id,
priority=5,
match=match,
actions=actions)
@staticmethod
def _dvr_to_src_mac_match(ofp, ofpp, vlan_tag, dst_mac):
if not vlan_tag:
# When the network is flat type, the vlan_tag will be None.
return ofpp.OFPMatch(vlan_vid=ofp.OFPVID_NONE, eth_dst=dst_mac)
return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT,
eth_dst=dst_mac)
@staticmethod
def _dvr_to_src_mac_table_id(network_type):
if network_type in constants.DVR_PHYSICAL_NETWORK_TYPES:
return constants.DVR_TO_SRC_MAC_PHYSICAL
else:
return constants.DVR_TO_SRC_MAC
def install_dvr_to_src_mac(self, network_type,
vlan_tag, gateway_mac, dst_mac, dst_port):
table_id = self._dvr_to_src_mac_table_id(network_type)
(_dp, ofp, ofpp) = self._get_dp()
match = self._dvr_to_src_mac_match(ofp, ofpp,
vlan_tag=vlan_tag, dst_mac=dst_mac)
actions = [
ofpp.OFPActionSetField(eth_src=gateway_mac),
]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
ofpp.OFPInstructionGotoTable(table_id=constants.PACKET_RATE_LIMIT),
]
self.install_instructions(table_id=table_id,
priority=20,
match=match,
instructions=instructions)
actions = []
if vlan_tag:
actions.append(ofpp.OFPActionPopVlan())
actions.append(ofpp.OFPActionOutput(dst_port, 0))
self.install_apply_actions(table_id=constants.TRANSIENT_TABLE,
priority=20,
match=match,
actions=actions)
def delete_dvr_to_src_mac(self, network_type, vlan_tag, dst_mac):
table_id = self._dvr_to_src_mac_table_id(network_type)
(_dp, ofp, ofpp) = self._get_dp()
match = self._dvr_to_src_mac_match(ofp, ofpp,
vlan_tag=vlan_tag, dst_mac=dst_mac)
for table in (table_id, constants.TRANSIENT_TABLE):
self.uninstall_flows(
strict=True, priority=20, table_id=table, match=match)
def add_dvr_mac_physical(self, mac, port):
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=4,
in_port=port,
eth_src=mac,
dest_table_id=constants.DVR_TO_SRC_MAC_PHYSICAL)
def remove_dvr_mac_vlan(self, mac):
# REVISIT(yamamoto): match in_port as well?
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
eth_src=mac)
def add_dvr_mac_tun(self, mac, port):
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=2,
in_port=port,
eth_src=mac,
dest_table_id=constants.DVR_TO_SRC_MAC)
def remove_dvr_mac_tun(self, mac, port):
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
in_port=port, eth_src=mac)
def delete_dvr_dst_mac_for_arp(self, network_type,
vlan_tag, gateway_mac, dvr_mac, rtr_port):
table_id = self._dvr_to_src_mac_table_id(network_type)
(_dp, ofp, ofpp) = self._get_dp()
match = self._arp_dvr_dst_mac_match(ofp, ofpp, vlan_tag, dvr_mac)
self.uninstall_flows(
strict=True, priority=5, table_id=table_id, match=match)
def add_dvr_gateway_mac_arp_vlan(self, mac, port):
self.install_goto(
table_id=constants.LOCAL_SWITCHING,
priority=5,
in_port=port,
eth_dst=mac,
dest_table_id=constants.ARP_DVR_MAC_TO_DST_MAC_PHYSICAL)
def remove_dvr_gateway_mac_arp_vlan(self, mac, port):
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
eth_dst=mac)
def add_dvr_gateway_mac_arp_tun(self, mac, port):
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=5,
in_port=port,
eth_dst=mac,
dest_table_id=constants.ARP_DVR_MAC_TO_DST_MAC)
def remove_dvr_gateway_mac_arp_tun(self, mac, port):
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
eth_dst=mac)
@staticmethod
def _arp_reply_match(ofp, ofpp, port):
return ofpp.OFPMatch(in_port=port,
eth_type=ether_types.ETH_TYPE_ARP)
@staticmethod
def _icmpv6_reply_match(ofp, ofpp, port):
return ofpp.OFPMatch(in_port=port,
eth_type=ether_types.ETH_TYPE_IPV6,
ip_proto=in_proto.IPPROTO_ICMPV6,
icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT)
def install_icmpv6_na_spoofing_protection(self, port, ip_addresses):
# Allow neighbor advertisements as long as they match addresses
# that actually belong to the port.
for ip in ip_addresses:
masked_ip = self._cidr_to_os_ken(ip)
self.install_goto(
table_id=constants.ARP_SPOOF_TABLE, priority=2,
eth_type=ether_types.ETH_TYPE_IPV6,
ip_proto=in_proto.IPPROTO_ICMPV6,
icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT,
ipv6_nd_target=masked_ip, in_port=port,
dest_table_id=constants.PACKET_RATE_LIMIT)
# Now that the rules are ready, direct icmpv6 neighbor advertisement
# traffic from the port into the anti-spoof table.
(_dp, ofp, ofpp) = self._get_dp()
match = self._icmpv6_reply_match(ofp, ofpp, port=port)
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=10,
match=match,
dest_table_id=constants.ARP_SPOOF_TABLE)
def set_allowed_macs_for_port(self, port, mac_addresses=None,
allow_all=False):
if allow_all:
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
in_port=port)
self.uninstall_flows(table_id=constants.MAC_SPOOF_TABLE,
in_port=port)
return
mac_addresses = mac_addresses or []
for address in mac_addresses:
self.install_goto(
table_id=constants.MAC_SPOOF_TABLE, priority=2,
eth_src=address, in_port=port,
dest_table_id=constants.LOCAL_EGRESS_TABLE)
# normalize so we can see if macs are the same
mac_addresses = {netaddr.EUI(mac) for mac in mac_addresses}
flows = self.dump_flows(constants.MAC_SPOOF_TABLE)
for flow in flows:
matches = dict(flow.match.items())
if matches.get('in_port') != port:
continue
if not matches.get('eth_src'):
continue
flow_mac = matches['eth_src']
if netaddr.EUI(flow_mac) not in mac_addresses:
self.uninstall_flows(table_id=constants.MAC_SPOOF_TABLE,
in_port=port, eth_src=flow_mac)
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=9, in_port=port,
dest_table_id=constants.MAC_SPOOF_TABLE)
def install_arp_spoofing_protection(self, port, ip_addresses):
# allow ARP replies as long as they match addresses that actually
# belong to the port.
for ip in ip_addresses:
masked_ip = self._cidr_to_os_ken(ip)
self.install_goto(table_id=constants.ARP_SPOOF_TABLE,
priority=2,
eth_type=ether_types.ETH_TYPE_ARP,
arp_spa=masked_ip,
in_port=port,
dest_table_id=constants.MAC_SPOOF_TABLE)
# Now that the rules are ready, direct ARP traffic from the port into
# the anti-spoof table.
# This strategy fails gracefully because OVS versions that can't match
# on ARP headers will just process traffic normally.
(_dp, ofp, ofpp) = self._get_dp()
match = self._arp_reply_match(ofp, ofpp, port=port)
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=10,
match=match,
dest_table_id=constants.ARP_SPOOF_TABLE)
def list_meter_features(self):
(dp, _ofp, ofpp) = self._get_dp()
req = ofpp.OFPMeterFeaturesStatsRequest(dp, 0)
rep = self._send_msg(req, reply_cls=ofpp.OFPMeterFeaturesStatsReply)
features = []
for stat in rep.body:
features.append({"max_meter": stat.max_meter,
"band_types": stat.band_types,
"capabilities": stat.capabilities,
"max_bands": stat.max_bands,
"max_color": stat.max_color})
return features
def create_meter(self, meter_id, rate, burst=0):
(dp, ofp, ofpp) = self._get_dp()
bands = [
ofpp.OFPMeterBandDrop(rate=rate, burst_size=burst)]
req = ofpp.OFPMeterMod(datapath=dp, command=ofp.OFPMC_ADD,
flags=ofp.OFPMF_PKTPS, meter_id=meter_id,
bands=bands)
self._send_msg(req)
def delete_meter(self, meter_id):
(dp, ofp, ofpp) = self._get_dp()
req = ofpp.OFPMeterMod(datapath=dp, command=ofp.OFPMC_DELETE,
flags=ofp.OFPMF_PKTPS, meter_id=meter_id)
self._send_msg(req)
def update_meter(self, meter_id, rate, burst=0):
(dp, ofp, ofpp) = self._get_dp()
bands = [
ofpp.OFPMeterBandDrop(rate=rate, burst_size=burst)]
req = ofpp.OFPMeterMod(datapath=dp, command=ofp.OFPMC_MODIFY,
flags=ofp.OFPMF_PKTPS, meter_id=meter_id,
bands=bands)
self._send_msg(req)
def apply_meter_to_port(self, meter_id, direction, mac,
in_port=None, local_vlan=None):
"""Add meter flows to port.
Ingress: match dst MAC and local_vlan ID
Egress: match src MAC and OF in_port
"""
(_dp, ofp, ofpp) = self._get_dp()
if direction == lib_consts.EGRESS_DIRECTION and in_port:
match = ofpp.OFPMatch(in_port=in_port, eth_src=mac)
elif direction == lib_consts.INGRESS_DIRECTION and local_vlan:
vlan_vid = local_vlan | ofp.OFPVID_PRESENT
match = ofpp.OFPMatch(vlan_vid=vlan_vid, eth_dst=mac)
else:
LOG.warning("Invalid inputs to add meter flows to port.")
return
instructions = [
ofpp.OFPInstructionMeter(meter_id, type_=ofp.OFPIT_METER),
ofpp.OFPInstructionGotoTable(table_id=constants.TRANSIENT_TABLE)]
self.install_instructions(table_id=constants.PACKET_RATE_LIMIT,
priority=100,
instructions=instructions,
match=match)
def remove_meter_from_port(self, direction, mac,
in_port=None, local_vlan=None):
"""Remove meter flows from port.
Ingress: match dst MAC and local_vlan ID
Egress: match src MAC and OF in_port
"""
(_dp, ofp, ofpp) = self._get_dp()
if direction == lib_consts.EGRESS_DIRECTION and in_port:
match = ofpp.OFPMatch(in_port=in_port, eth_src=mac)
elif direction == lib_consts.INGRESS_DIRECTION and local_vlan:
vlan_vid = local_vlan | ofp.OFPVID_PRESENT
match = ofpp.OFPMatch(vlan_vid=vlan_vid, eth_dst=mac)
else:
LOG.warning("Invalid inputs to remove meter flows from port.")
return
self.uninstall_flows(table_id=constants.PACKET_RATE_LIMIT,
match=match)
def delete_arp_spoofing_protection(self, port):
(_dp, ofp, ofpp) = self._get_dp()
match = self._arp_reply_match(ofp, ofpp, port=port)
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
match=match)
match = self._icmpv6_reply_match(ofp, ofpp, port=port)
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
match=match)
self.delete_arp_spoofing_allow_rules(port)
def delete_arp_spoofing_allow_rules(self, port):
self.uninstall_flows(table_id=constants.ARP_SPOOF_TABLE,
in_port=port)
def install_dscp_marking_rule(self, port, dscp_mark):
# reg2 is a metadata field that does not alter packets.
# By loading a value into this field and checking if the value is
# altered it allows the packet to be resubmitted and go through
# the flow table again to be identified by other flows.
(dp, ofp, ofpp) = self._get_dp()
actions = [ofpp.OFPActionSetField(reg2=1),
ofpp.OFPActionSetField(ip_dscp=dscp_mark),
ofpp.NXActionResubmit(in_port=port)]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
self.install_instructions(instructions, table_id=0,
priority=65535, in_port=port, reg2=0,
eth_type=0x0800)
self.install_instructions(instructions, table_id=0,
priority=65535, in_port=port, reg2=0,
eth_type=0x86DD)
def setup_local_egress_flows(self, in_port, vlan):
if in_port == constants.OFPORT_INVALID:
LOG.warning("Invalid ofport: %s, vlan: %s - "
"skipping setup_local_egress_flows", in_port, vlan)
return
# Setting priority to 8 to give advantage to ARP/MAC spoofing rules
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=8,
in_port=in_port,
dest_table_id=constants.LOCAL_EGRESS_TABLE)
(dp, ofp, ofpp) = self._get_dp()
actions = [ofpp.OFPActionSetField(reg6=vlan),
ofpp.NXActionResubmitTable(
in_port=in_port, table_id=constants.LOCAL_IP_TABLE)]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
self.install_instructions(instructions,
table_id=constants.LOCAL_EGRESS_TABLE,
priority=10, in_port=in_port)
@staticmethod
def _arp_responder_match(ofp, ofpp, vlan, ip):
return ofpp.OFPMatch(reg6=vlan,
eth_type=ether_types.ETH_TYPE_ARP,
arp_tpa=ip)
def _garp_blocker_match(self, vlan, ip):
(dp, ofp, ofpp) = self._get_dp()
return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT,
eth_type=ether_types.ETH_TYPE_ARP,
arp_spa=ip)
def install_garp_blocker(self, vlan, ip,
table_id=constants.LOCAL_SWITCHING):
match = self._garp_blocker_match(vlan, ip)
self.install_drop(table_id=table_id,
priority=10,
match=match)
def delete_garp_blocker(self, vlan, ip,
table_id=constants.LOCAL_SWITCHING):
match = self._garp_blocker_match(vlan, ip)
self.uninstall_flows(table_id=table_id,
priority=10,
match=match)
def _garp_blocker_exception_match(self, vlan, ip, except_ip):
(dp, ofp, ofpp) = self._get_dp()
return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT,
eth_type=ether_types.ETH_TYPE_ARP,
arp_spa=ip,
arp_tpa=except_ip)
def install_garp_blocker_exception(self, vlan, ip, except_ip,
table_id=constants.LOCAL_SWITCHING):
match = self._garp_blocker_exception_match(vlan, ip, except_ip)
self.install_goto(dest_table_id=constants.PACKET_RATE_LIMIT,
table_id=table_id,
priority=11,
match=match)
def delete_garp_blocker_exception(self, vlan, ip, except_ip,
table_id=constants.LOCAL_SWITCHING):
match = self._garp_blocker_exception_match(vlan, ip, except_ip)
self.uninstall_flows(table_id=table_id,
priority=11,
match=match)
| {
"content_hash": "a91cd210ccdde5fe01030cbdd2442ea6",
"timestamp": "",
"source": "github",
"line_count": 611,
"max_line_length": 79,
"avg_line_length": 44.06055646481178,
"alnum_prop": 0.5472679320976189,
"repo_name": "openstack/neutron",
"id": "904268882d2ee20c36ad0e9d80bf0809cbb3e2f8",
"size": "27682",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
} |
""" setuptools for stalkerweb """
from setuptools import setup, find_packages
from stalkerweb import __version__ as version
setup(
name='stalkerweb',
version=version,
author="Florian Hines",
author_email="[email protected]",
description="Simple Monitoring System",
url="http://github.com/pandemicsyn/stalker",
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Environment :: No Input/Output (Daemon)',
],
install_requires=[
'stalkerutils==2.0.2',
'eventlet==0.17.4',
'flask==0.10.1',
'redis==2.10.3',
'pymongo==3.0.3',
'mmh3==2.3.1',
'flask-rethinkdb==0.2',
'rethinkdb==2.1.0.post2',
'flask-bcrypt==0.7.1',
'flask-wtf==0.12',
],
include_package_data=True,
zip_safe=False,
scripts=['bin/stalker-web',],
data_files=[('share/doc/stalkerweb',
['README.md', 'INSTALL',
'etc/stalker-web.conf',
'etc/init.d/stalker-web',
])]
)
| {
"content_hash": "23b283b9a5e1fe950ad97aa72e0dec2d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 61,
"avg_line_length": 29.878048780487806,
"alnum_prop": 0.5510204081632653,
"repo_name": "ormandj/stalker",
"id": "27e54ee755a31cb4bf1b19cc1d0bf26ead161bb4",
"size": "1247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stalkerweb/setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "35664"
},
{
"name": "HTML",
"bytes": "48554"
},
{
"name": "JavaScript",
"bytes": "15047"
},
{
"name": "Makefile",
"bytes": "2040"
},
{
"name": "Python",
"bytes": "118647"
},
{
"name": "Shell",
"bytes": "9554"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from prompt_toolkit.utils import get_cwidth
from collections import defaultdict, namedtuple
from pygments.token import Token
import six
__all__ = (
'Point',
'Size',
'Screen',
'Char',
)
Point = namedtuple('Point', 'y x')
Size = namedtuple('Size', 'rows columns')
class Char(object):
"""
Represent a single character in a :class:`.Screen`.
This should be considered immutable.
"""
__slots__ = ('char', 'token', 'width')
# If we end up having one of these special control sequences in the input string,
# we should display them as follows:
# Usually this happens after a "quoted insert".
display_mappings = {
'\x00': '^@', # Control space
'\x01': '^A',
'\x02': '^B',
'\x03': '^C',
'\x04': '^D',
'\x05': '^E',
'\x06': '^F',
'\x07': '^G',
'\x08': '^H',
'\x09': '^I',
'\x0a': '^J',
'\x0b': '^K',
'\x0c': '^L',
'\x0d': '^M',
'\x0e': '^N',
'\x0f': '^O',
'\x10': '^P',
'\x11': '^Q',
'\x12': '^R',
'\x13': '^S',
'\x14': '^T',
'\x15': '^U',
'\x16': '^V',
'\x17': '^W',
'\x18': '^X',
'\x19': '^Y',
'\x1a': '^Z',
'\x1b': '^[', # Escape
'\x1c': '^\\',
'\x1d': '^]',
'\x1f': '^_',
'\x7f': '^?', # Control backspace
}
def __init__(self, char=' ', token=Token):
# If this character has to be displayed otherwise, take that one.
char = self.display_mappings.get(char, char)
self.char = char
self.token = token
# Calculate width. (We always need this, so better to store it directly
# as a member for performance.)
self.width = get_cwidth(char)
def __eq__(self, other):
return self.char == other.char and self.token == other.token
def __ne__(self, other):
# Not equal: We don't do `not char.__eq__` here, because of the
# performance of calling yet another function.
return self.char != other.char or self.token != other.token
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.char, self.token)
class CharCache(dict):
"""
Cache of :class:`.Char` instances.
Mapping of (character, Token) tuples to Char instances.
(Char instances should be considered immutable.)
"""
def __missing__(self, key):
c = Char(*key)
self[key] = c
return c
_CHAR_CACHE = CharCache()
Transparent = Token.Transparent
class Screen(object):
"""
Two dimentional buffer of :class:`.Char` instances.
Typical usage::
screen = Screen()
screen.write_data([
(Token, 'text'), (Token, 'text'),
])
"""
def __init__(self, default_char=None, initial_width=0, initial_height=0):
if default_char is None:
default_char = Char(token=Transparent)
self.data_buffer = defaultdict(lambda: defaultdict(lambda: default_char))
#: Position of the cursor.
self.cursor_position = Point(y=0, x=0)
#: Visibility of the cursor.
self.show_cursor = True
#: (Optional) Where to position the menu. E.g. at the start of a completion.
#: (We can't use the cursor position, because we don't want the
#: completion menu to change its position when we browse through all the
#: completions.)
self.menu_position = None
#: Currently used width/height of the screen. This will increase when
#: data is written to the screen.
self.width = initial_width or 0
self.height = initial_height or 0
#: Mapping of buffer lines to input lines.
self.screen_line_to_input_line = {}
def write_data(self, data, width=None):
"""
Write a list of tokens to the screen.
When one of the tokens in the token list is
``Token.SetCursorPosition``, this will set the cursor position.
:param data: List of Token tuples to write to the buffer.
:param width: Width of the line wrap. (Don't wrap when `width` is None.)
:returns: A dictionary mapping the character positions of the input data to
(x, y) coordinates.
"""
if width is None:
width = 10 ** 100 # A very big number.
buffer = self.data_buffer
screen_line_to_input_line = self.screen_line_to_input_line
x = 0
y = 0
max_allowed_x = x + width
index = 0
line_number = 0
requires_line_feed = True
indexes_to_pos = {} # Map input positions to (x, y) coordinates.
set_cursor_position = Token.SetCursorPosition
max_x = 0
for token, text in data:
if token == set_cursor_position:
self.cursor_position = Point(y=y, x=x)
for char in text:
# Line feed.
if requires_line_feed:
screen_line_to_input_line[y] = line_number
requires_line_feed = False
char_obj = _CHAR_CACHE[char, token]
char_width = char_obj.width
# In case there is no more place left at this line, go first to the
# following line. (Also in case of double-width characters.)
if x + char_width > max_allowed_x and char != '\n':
max_x = max(max_x, x)
y += 1
x = 0
# Keep mapping of index to position.
indexes_to_pos[index] = (x, y)
# Insertion of newline
if char == '\n':
max_x = max(max_x, x)
y += 1
x = 0
requires_line_feed = True
line_number += 1
# Insertion of a 'visible' character.
else:
buffer_y = buffer[y]
buffer_y[x] = char_obj
# When we have a double width character, store this byte in the
# second cell. So that if this character gets deleted afterwarsd,
# the ``output_screen_diff`` will notice that this byte is also
# gone and redraw both cells.
if char_width > 1:
buffer_y[x+1] = Char(six.unichr(0))
# Move position
x += char_width
index += 1
self.height = max(self.height, y + 1)
self.width = max(self.width, max_x, x)
return indexes_to_pos
def replace_all_tokens(self, token):
"""
For all the characters in the screen. Set the token to the given `token`.
"""
b = self.data_buffer
for y, row in b.items():
for x, char in row.items():
b[y][x] = _CHAR_CACHE[char.char, token]
class WritePosition(object):
def __init__(self, xpos, ypos, width, height, extended_height=None):
assert height >= 0
assert extended_height is None or extended_height >= 0
assert width >= 0
# xpos and ypos can be negative. (A float can be partially visible.)
self.xpos = xpos
self.ypos = ypos
self.width = width
self.height = height
self.extended_height = extended_height or height
def __repr__(self):
return '%s(%r, %r, %r, %r, %r)' % (
self.__class__.__name__,
self.xpos, self.ypos, self.width, self.height, self.extended_height)
| {
"content_hash": "1705baee8de9901dc9cce155020996c9",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 85,
"avg_line_length": 30.87550200803213,
"alnum_prop": 0.5174297606659729,
"repo_name": "niklasf/python-prompt-toolkit",
"id": "7edf0f94e0025b0365a959e3750acc26872a38c8",
"size": "7688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prompt_toolkit/layout/screen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "635808"
}
],
"symlink_target": ""
} |
"""
Compile ONNX Models
===================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_
This article is an introductory tutorial to deploy ONNX models with NNVM.
For us to begin with, onnx module is required to be installed.
A quick solution is to install protobuf compiler, and
.. code-block:: bash
pip install onnx --user
or please refer to offical site.
https://github.com/onnx/onnx
"""
import nnvm
import tvm
from tvm.contrib.download import download_testdata
import onnx
import numpy as np
######################################################################
# Load pretrained ONNX model
# ---------------------------------------------
# The example super resolution model used here is exactly the same model in onnx tutorial
# http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
# we skip the pytorch model construction part, and download the saved onnx model
model_url = ''.join(['https://gist.github.com/zhreshold/',
'bcda4716699ac97ea44f791c24310193/raw/',
'93672b029103648953c4e5ad3ac3aadf346a4cdc/',
'super_resolution_0.2.onnx'])
model_path = download_testdata(model_url, 'super_resolution.onnx', module='onnx')
# now you have super_resolution.onnx on disk
onnx_model = onnx.load_model(model_path)
# we can load the graph as NNVM compatible model
sym, params = nnvm.frontend.from_onnx(onnx_model)
######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples!
from PIL import Image
img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
img_path = download_testdata(img_url, 'cat.png', module='data')
img = Image.open(img_path).resize((224, 224))
img_ycbcr = img.convert("YCbCr") # convert to YCbCr
img_y, img_cb, img_cr = img_ycbcr.split()
x = np.array(img_y)[np.newaxis, np.newaxis, :, :]
######################################################################
# Compile the model on NNVM
# ---------------------------------------------
# We should be familiar with the process right now.
import nnvm.compiler
target = 'cuda'
# assume first input name is data
input_name = sym.list_input_names()[0]
shape_dict = {input_name: x.shape}
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params)
######################################################################
# Execute on TVM
# ---------------------------------------------
# The process is no different from other example
from tvm.contrib import graph_runtime
ctx = tvm.gpu(0)
dtype = 'float32'
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input(input_name, tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
output_shape = (1, 1, 672, 672)
tvm_output = m.get_output(0, tvm.nd.empty(output_shape, dtype)).asnumpy()
######################################################################
# Display results
# ---------------------------------------------
# We put input and output image neck to neck
from matplotlib import pyplot as plt
out_y = Image.fromarray(np.uint8((tvm_output[0, 0]).clip(0, 255)), mode='L')
out_cb = img_cb.resize(out_y.size, Image.BICUBIC)
out_cr = img_cr.resize(out_y.size, Image.BICUBIC)
result = Image.merge('YCbCr', [out_y, out_cb, out_cr]).convert('RGB')
canvas = np.full((672, 672*2, 3), 255)
canvas[0:224, 0:224, :] = np.asarray(img)
canvas[:, 672:, :] = np.asarray(result)
plt.imshow(canvas.astype(np.uint8))
plt.show()
| {
"content_hash": "5635904604ef3dd471947ce60a96b2db",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 89,
"avg_line_length": 37.863157894736844,
"alnum_prop": 0.5938281901584654,
"repo_name": "Huyuwei/tvm",
"id": "97d154615e67d448fbd609d7591ac9239b57928b",
"size": "4382",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nnvm/tutorials/from_onnx.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5569606"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6775044"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96633"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
} |
import sys
import pprint
from ciscoconfparse import CiscoConfParse
__author__ = 'aismagilov'
__docformat__ = 'reStructuredText'
__license__ = "MIT"
if __name__ == '__main__':
pass
else:
sys.exit('It cannot be imported...yet.')
cisco_cfg = CiscoConfParse('cisco_ipsec.txt')
interfaces = cisco_cfg.find_objects(r'^interface')
# pprint.pprint(interfaces[4].children[0].parent)
# for intf in interfaces:
# print intf.text
OSPF_key_chain_cfg = cisco_cfg.find_objects(r'^key chain OSPF')
# pprint.pprint(OSPF_key_chain_cfg[0].all_children)
interfaces_wo_IP = cisco_cfg.find_objects_w_child(parentspec=r'^interface', childspec=r'no ip address')
for intf in interfaces_wo_IP:
print(intf.text)
| {
"content_hash": "554b6cea895ffb477a476e801de56ea0",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 103,
"avg_line_length": 22.967741935483872,
"alnum_prop": 0.7050561797752809,
"repo_name": "aism/pynet-course",
"id": "4b418b505ac9ef48f76a9cf353e7d9616a4483b1",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Week1/ciscoConfParse_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29181"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
replaces = [(b'api', '0001_initial'), (b'api', '0002_auto_20151123_1606'), (b'api', '0003_auto_20151125_1435'), (b'api', '0004_datafile_upload_state'), (b'api', '0005_auto_20151126_0153'), (b'api', '0006_userstorageaccount_name'), (b'api', '0007_auto_20151130_2059'), (b'api', '0008_folder_storage_key'), (b'api', '0009_gdriveprovider_quota_bytes'), (b'api', '0010_auto_20151208_0742'), (b'api', '0011_datafile_external_link'), (b'api', '0012_useraction')]
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Datafile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('filename', models.CharField(max_length=1024)),
('size', models.IntegerField(default=0)),
('storage_key', models.CharField(default='', max_length=1024)),
('upload_state', models.CharField(default=b'DATAFILE_TRANSFER_IN_PROGRESS', max_length=255, choices=[(b'DATAFILE_READY', b'Ok'), (b'DATAFILE_UPLOAD_IN_PROGRESS', b'Upload in progress'), (b'DATAFILE_ERROR', b'Error'), (b'DATAFILE_TRANSFER_IN_PROGRESS', b'Transfer in progress')])),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=1024)),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserStorageAccount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='S3Provider',
fields=[
('userstorageaccount_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='api.UserStorageAccount')),
('access_key_id', models.CharField(max_length=255)),
('secret_access_key', models.CharField(max_length=255)),
('bucket_name', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
bases=('api.userstorageaccount',),
),
migrations.AddField(
model_name='userstorageaccount',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='userstorageaccount',
name='validated',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='userstorageaccount',
name='name',
field=models.CharField(max_length=255, blank=True),
),
migrations.CreateModel(
name='Folder',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=1024)),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('parent', models.ForeignKey(to='api.Folder', null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GDriveProvider',
fields=[
('userstorageaccount_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='api.UserStorageAccount')),
('credentials', models.CharField(max_length=4096)),
('quota_bytes', models.BigIntegerField(default=0)),
],
options={
'abstract': False,
},
bases=('api.userstorageaccount',),
),
migrations.AddField(
model_name='datafile',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='folder',
name='storage_account',
field=models.ForeignKey(to='api.UserStorageAccount'),
),
migrations.AddField(
model_name='datafile',
name='folder',
field=models.ForeignKey(default=0, to='api.Folder'),
preserve_default=False,
),
migrations.AddField(
model_name='folder',
name='storage_key',
field=models.CharField(default='root', max_length=1024),
preserve_default=False,
),
migrations.AlterField(
model_name='datafile',
name='size',
field=models.IntegerField(default=None, null=True),
),
migrations.AddField(
model_name='datafile',
name='external_link',
field=models.URLField(max_length=8192, blank=True),
),
migrations.CreateModel(
name='UserAction',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('action_type', models.CharField(max_length=255)),
('args', models.TextField()),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at'],
},
),
]
| {
"content_hash": "d57005666d0e3e74328dcd7d516dde69",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 460,
"avg_line_length": 43.48717948717949,
"alnum_prop": 0.5445165094339622,
"repo_name": "h2020-westlife-eu/VRE",
"id": "37791d59a53031cef92c20ee07bd7b92f2d6f5e1",
"size": "6808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/migrations/0001_squashed_0012_useraction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46730"
},
{
"name": "HTML",
"bytes": "167326"
},
{
"name": "JavaScript",
"bytes": "22031"
},
{
"name": "Python",
"bytes": "310068"
},
{
"name": "Shell",
"bytes": "863"
}
],
"symlink_target": ""
} |
"""Tests for the currently experimental in-graph batch ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.errors import InvalidArgumentError
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import batch_ops
from tensorflow.python.ops import gen_batch_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def delayed_plus1(x):
"""Sleeps for 100ms then returns x+1."""
time.sleep(0.1)
return x + 1
@test_util.run_all_in_graph_and_eager_modes
class BatchOpsTest(test.TestCase):
"""Tests for batch_ops.{un,}batch."""
# Test for only non eager mode as batching in eager context as a functionality
# is TBD.
def testBasicBatch(self):
"""Tests that a single batched tensor executes together and only once."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
index_t = thread_results[1]
empty_b = main_results[0][0]
empty_m = main_results[1]
else:
batch_t = main_results[0][0]
index_t = main_results[1]
empty_b = thread_results[0][0]
empty_m = thread_results[1]
# Check that both the inputs made it out exactly once.
self.assertAllEqual(sorted(batch_t), (1, 2))
# Check that we get 2 rows in the index tensor.
self.assertEqual(len(index_t), 2)
# Check that the other ones are empty.
self.assertEqual(len(empty_b), 0)
self.assertEqual(len(empty_m), 0)
def testBatchWithPadding(self):
"""Test that batching with padding up to an allowed batch size works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[5, 10],
grad_timeout_micros=0, batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1, 3]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2, 4]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
else:
batch_t = main_results[0][0]
# Check that the batch tensor incorporates the padding.
self.assertEqual(len(batch_t), 5)
def testMultipleBatch(self):
"""Tests that multiple batched tensors execute together."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, _, _ = batch_ops.batch(
[inp0, inp1],
num_batch_threads=1,
max_batch_size=2,
batch_timeout_micros=36000000,
grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched], feed_dict={inp0: [1],
inp1: [2]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched], feed_dict={inp0: [2], inp1: [3]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0]
empty_t = main_results[0]
else:
batch_t = main_results[0]
empty_t = thread_results[0]
# Assert that the tensors were batched together.
self.assertAllEqual(sorted(batch_t[0]), [1, 2])
self.assertAllEqual(sorted(batch_t[1]), [2, 3])
self.assertAllEqual(empty_t[0], [])
self.assertAllEqual(empty_t[1], [])
def testIllegalBatchDifferentDim0Sizes(self):
"""Tests illegally feeding tensors with different dim0 sizes."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp0, inp1], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=0, grad_timeout_micros=0, batching_queue="")
with self.assertRaises(Exception) as raised:
_ = sess.run([batched, index], feed_dict={inp0: [0], inp1: [1, 2]})
self.assertGreater(
raised.exception.message.find("must have equal 0th-dimension size"),
0)
def testBasicUnbatch(self):
"""Tests that batch and unbatch work together."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[3, 10],
grad_timeout_micros=0, batching_queue="")
computation = batched[0] + 1
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBasicUnbatchDecorated(self):
"""Tests that the batch_function decorator works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
# TODO(apassos): Removing this line causes test flakiness! Ideally should
# be investigated.
default_inp = array_ops.placeholder_with_default(2, shape=[]) # pylint: disable=unused-variable
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
self.assertTrue(in_t.shape is not None)
return in_t + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBatchDecoratedWithCapturedInput(self):
"""Tests that the batch_function decorator works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
captured_inp0 = array_ops.placeholder_with_default(2., shape=[])
captured_inp1 = resource_variable_ops.ResourceVariable(3.)
with ops.device("/cpu:0"):
captured_inp2 = resource_variable_ops.ResourceVariable(4.)
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
return in_t + captured_inp0 + captured_inp1 + captured_inp2
inp = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
sess.run(variables.global_variables_initializer())
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [10])
self.assertEqual(main_results[0], [11])
@test_util.disable_xla("DeviceIndex returns sentinel value with XLA")
def testBatchDecoratedGpu(self):
if context.executing_eagerly():
return
with self.cached_session() as sess:
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
# index is 0 on CPU and 1 on GPU
index = gen_functional_ops.DeviceIndex(device_names=["CPU", "GPU"])
return in_t + math_ops.cast(index, dtypes.float32)
inp = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [10.]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [20.]})
worker_thread.join()
self.assertEqual(thread_results[0], [10 + test_util.is_gpu_available()])
self.assertEqual(main_results[0], [20 + test_util.is_gpu_available()])
def testParallelRunsWithCpuAndGpu(self):
# Run multiple instances of a batch function in parallel. This is a
# regression test: this used to fail because _Send nodes for one call would
# send the tensor to the _Recv node for a different call.
if context.executing_eagerly():
return
@batch_ops.batch_function(1, 2, 1)
def f(x):
with ops.device("/GPU:0"):
x = x + 1.
with ops.device("/CPU:0"):
return x + 1
num_calls = 10
placeholders = [array_ops.placeholder(dtypes.float32, shape=(1,))
for _ in range(num_calls)]
results = []
for p in placeholders:
result = f(p)
results.append(result)
inputs = [[float(i)] for i in range(num_calls)]
expected = [[float(i + 2)] for i in range(num_calls)]
with self.session() as sess:
outputs = sess.run(results, feed_dict=dict(zip(placeholders, inputs)))
self.assertAllEqual(outputs, expected)
def testSoftPlacement(self):
if context.executing_eagerly():
return
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
with ops.device("/GPU:0"):
return in_t + 1.
inp = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
result = computation(inp)
# With soft placement, the function will run even without a GPU
config = config_pb2.ConfigProto(allow_soft_placement=True)
with self.session(config=config) as sess:
sess.run([result], feed_dict={inp: [20.]})
# Without soft placement, the function fails without a GPU due to the
# addition explicitly being placed on the GPU
config.allow_soft_placement = False
with self.session(config=config) as sess:
if test_util.is_gpu_available():
sess.run([result], feed_dict={inp: [20.]})
else:
with self.assertRaisesRegex(InvalidArgumentError,
"Cannot assign a device for operation"):
sess.run([result], feed_dict={inp: [20.]})
def testBatchFunctionOp(self):
"""Tests that the batch_function op works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
@function.Defun(dtypes.int32)
def computation(in_t):
return in_t + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
result = gen_batch_ops.batch_function(
[inp],
num_batch_threads=1,
max_batch_size=10,
batch_timeout_micros=100000,
Tout=[dtypes.int32],
f=computation,
captured_tensors=computation.captured_inputs)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBatchFunctionOpWithCapturedInput(self):
"""Tests that batch_function op works with captured input."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
captured_inp0 = array_ops.placeholder_with_default(2, shape=[])
captured_inp1 = array_ops.placeholder_with_default(1, shape=[])
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
@function.Defun(dtypes.int32)
def computation(inp):
return inp + captured_inp0 - captured_inp1
result = gen_batch_ops.batch_function(
num_batch_threads=1,
max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[3, 10],
batching_queue="",
f=computation,
in_tensors=[inp],
captured_tensors=computation.captured_inputs,
Tout=[o.type for o in computation.definition.signature.output_arg])
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBatchFunctionOpWithInputError(self):
"""Tests that batch_function op works with error in the inputs."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
@function.Defun(dtypes.int32, dtypes.int32)
def computation(in0, in1):
return in0 + in1
result = gen_batch_ops.batch_function(
[inp], # computation actually expects 2 inputs.
num_batch_threads=1,
max_batch_size=10,
batch_timeout_micros=100000, # 100ms
batching_queue="",
f=computation,
captured_tensors=computation.captured_inputs,
Tout=[o.type for o in computation.definition.signature.output_arg])
with self.assertRaisesRegex(
InvalidArgumentError,
r"Function takes 2 argument\(s\) but 1 argument\(s\) were passed"):
sess.run([result], feed_dict={inp: [2]})
def testBatchFunctionOpWithLargeBatchSplitted(self):
"""Tests that the batch_function op works with large batch splitted."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
@function.Defun(dtypes.int32)
def computation(in_t):
return in_t + 3
inp = array_ops.placeholder(dtype=dtypes.int32)
result = gen_batch_ops.batch_function(
[inp],
num_batch_threads=2,
# enable_large_batch_splitting is True, so it's valid as long as
# max('allowed_batch_sizes') <= 'max_batch_size'.
allowed_batch_sizes=[1, 2],
max_batch_size=5,
batch_timeout_micros=100000, # 100ms
Tout=[dtypes.int32],
enable_large_batch_splitting=True,
f=computation,
captured_tensors=computation.captured_inputs)
thread1_results = []
thread2_results = []
# Input sizes of worker1 and main thread are larger than
# max(allowed_batch_sizes), while input size of worker2 is smaller.
def worker1():
thread1_results.extend(
sess.run([result], feed_dict={inp: [5, 6, 7, 8, 9]}))
worker_thread1 = threading.Thread(target=worker1)
worker_thread1.start()
def worker2():
thread2_results.extend(sess.run([result], feed_dict={inp: [10]}))
worker_thread2 = threading.Thread(target=worker2)
worker_thread2.start()
main_results = sess.run([result], feed_dict={inp: [2, 3, 4]})
worker_thread1.join()
worker_thread2.join()
self.assertTrue(
np.all(np.equal(thread2_results[0], np.array([13], dtype=np.int32))))
self.assertTrue(
np.all(
np.equal(thread1_results[0],
np.array([8, 9, 10, 11, 12], dtype=np.int32))))
self.assertTrue(
np.all(
np.equal(main_results[0], np.array([5, 6, 7], dtype=np.int32))))
def testBasicUnbatchDecoratedWithReshape(self):
"""Tests that the batch_function decorator works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
return array_ops.reshape(in_t, [-1]) + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1, 1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [[1]]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [[2]]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testUnbatchTimeout(self):
"""Tests that the unbatch timeout works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
computation = batched[0] + 1
timeout_micros = 10
result = batch_ops.unbatch(computation, index, id_t, timeout_micros,
shared_name="shared_unbatch")
# Set up a parallel pipeline that delays the computation, but uses the
# same unbatch resource object as the non-delayed pipeline.
computation_delayed = script_ops.py_func(delayed_plus1,
[batched[0]],
dtypes.int32)
result_delayed = batch_ops.unbatch(computation_delayed,
index,
id_t,
timeout_micros,
shared_name="shared_unbatch")
thread_results = []
def worker():
# A first call using the non-delayed pipeline. The batcher will send an
# empty tensor along the non-delayed pipeline.
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
time.sleep(0.1) # Ensure the thread's call starts first.
# A second call using the delayed pipeline. The batcher will send the
# batched tensor along the delayed pipeline, thus delaying the arrival of
# the batched tensor at the unbatch op, relative to the empty tensor.
#
# TODO(olston, apassos): Avoid relying on the order in which the batch op
# emits the empty tensor versus the batched one.
_ = sess.run([result_delayed], feed_dict={inp: [2]})
worker_thread.join()
# The thread's call should hit the timeout, and thus get 0 results.
self.assertEqual(len(thread_results), 0)
if __name__ == "__main__":
test.main()
| {
"content_hash": "dba2165c114ce12c18dee6c03b2817c1",
"timestamp": "",
"source": "github",
"line_count": 551,
"max_line_length": 102,
"avg_line_length": 37.686025408348456,
"alnum_prop": 0.6276908259089815,
"repo_name": "frreiss/tensorflow-fred",
"id": "c29a30600c549d8415ff20dd5c861cef141760e8",
"size": "21455",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/batch_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_NVX_gpu_memory_info'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_NVX_gpu_memory_info',error_checker=_errors._error_checker)
GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX=_C('GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX',0x9049)
GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX=_C('GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX',0x9047)
GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX=_C('GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX',0x904B)
GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX=_C('GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX',0x904A)
GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX=_C('GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX',0x9048)
| {
"content_hash": "d7a897bb67e90d64587441e6538370a4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 116,
"avg_line_length": 51.8421052631579,
"alnum_prop": 0.7654822335025381,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "6df49adbff3c7b77194912773f8666aeecb6429b",
"size": "985",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GL/NVX/gpu_memory_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
from .deprecation import deprecated
def _convert_responsive(responsive):
deprecated((0, 12, 10), "responsive parameter", "sizing_mode='fixed' for responsive=False or sizing_mode='scale_width' for responsive=True")
if responsive is True:
return 'scale_width'
if responsive is False:
return'fixed'
raise ValueError("'responsive' may only be True or False, passed %r" % responsive)
| {
"content_hash": "eb0401bcbc3d421190366fe0011da3ce",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 144,
"avg_line_length": 45.77777777777778,
"alnum_prop": 0.7111650485436893,
"repo_name": "philippjfr/bokeh",
"id": "10ce7153b54ca537f7d69581568d04f64f61c883",
"size": "412",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/util/_plot_arg_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "104935"
},
{
"name": "CoffeeScript",
"bytes": "1236045"
},
{
"name": "HTML",
"bytes": "48230"
},
{
"name": "JavaScript",
"bytes": "57759"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2642580"
},
{
"name": "Shell",
"bytes": "8519"
},
{
"name": "TypeScript",
"bytes": "228756"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import hmac
from hashlib import sha1
from unittest import TestCase
from flask import request, render_template
from flask_anyform import AnyForm, AForm
from wtforms import Form, TextField
from .test_app import create_app
class AnyFormTest(TestCase):
APP_KWARGS = {}
ANYFORM_CONFIG = None
def setUp(self):
super(AnyFormTest, self).setUp()
class TestForm(Form):
test = TextField(default='TEST FORM')
class TestForm2(Form):
test = TextField(default='TEST FORM TWO')
self.forms = [
{'af_tag':'test',
'af_form': TestForm,
'af_template': 'macros/_test.html',
'af_macro': 'test_macro',
'af_points': ['all'] },
{'af_tag':'test2',
'af_form': TestForm2,
'af_template': 'macros/_test.html',
'af_macro': 'test_macro',
'af_points': ['notindex']}
]
app_kwargs = self.APP_KWARGS
app = self._create_app(self.ANYFORM_CONFIG or {}, **app_kwargs)
app.debug = False
app.config['TESTING'] = True
app.config['SECRET_KEY'] = B'SECRET_KEY'
app.anyform_e = AnyForm(app, forms=self.forms)
self.app = app
self.client = app.test_client()
s = app.extensions['anyform']
@s.aform_ctx
def anyform_ctxfn():
return {'t1val': "RETURNED FROM A FORM CONTEXT FUNCTION"}
@s.aform_ctx
def test2_ctxfn():
return dict(t2v="RETURNED FROM A TAGGED CONTEXT VALUE FUNCTION")
with self.client.session_transaction() as session:
session['csrf'] = 'csrf_token'
csrf_hmac = hmac.new(self.app.config['SECRET_KEY'], 'csrf_token'.encode('utf-8'), digestmod=sha1)
self.csrf_token = '##' + csrf_hmac.hexdigest()
def _create_app(self, anyform_config, **kwargs):
return create_app(anyform_config, **kwargs)
def _get(self,
route,
content_type=None,
follow_redirects=None,
headers=None):
return self.client.get(route,
follow_redirects=follow_redirects,
content_type=content_type or 'text/html',
headers=headers)
def _post(self,
route,
data=None,
content_type=None,
follow_redirects=True,
headers=None):
if isinstance(data, dict):
data['csrf_token'] = self.csrf_token
return self.client.post(route,
data=data,
follow_redirects=follow_redirects,
content_type=content_type or
'application/x-www-form-urlencoded',
headers=headers)
| {
"content_hash": "165bdd0b57b1895187d583188c14e68b",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 105,
"avg_line_length": 31.966666666666665,
"alnum_prop": 0.5380604796663191,
"repo_name": "fc-thrisp-hurrata-dlm-graveyard/flask-anyform",
"id": "2339e267d25ff1cd2c434541c23e326069a74f0f",
"size": "2877",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26632"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys, os, json, subprocess
import auth, requests
def tidy(text):
#html, errors = tidylib.tidy_document(body, {'char-encoding': 'utf8'}) # libtidy exception on elcap
return subprocess.check_output(['/usr/bin/php', 'tidy.php', text])
def get_config(key):
res = os.getenv(key)
# if not res:
# testdata = {
# 'POPCLIP_OPTION_AUTHSECRET': '""',
# 'POPCLIP_TEXT': "<p>test text abc fdf"
# }
# if key in testdata:
# res = testdata[key]
return res;
try:
session = auth.get_session(json.loads(get_config('POPCLIP_OPTION_AUTHSECRET')))
except requests.exceptions.ConnectionError:
exit(1)
except:
exit(2)
# use html in preference to plain text
body = get_config('POPCLIP_HTML') or get_config('POPCLIP_TEXT')
# add page title if we know it
title = get_config('POPCLIP_BROWSER_TITLE')
if title:
body = '<title>%s</title' % title + body
# add source link if we know it
source = get_config('POPCLIP_BROWSER_URL')
if source:
body += '<p>Clipped from: <a href="%s">%s</a></p>' % (source, source)
# run the html through tidy
html = tidy(body)
# do the job
r = session.post('pages', files={'Presentation': ('Clipping', html, 'text/html')})
if r.status_code != 201:
exit(1) | {
"content_hash": "78ef9e8f708e24a4a92d287a1ad04b68",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 104,
"avg_line_length": 28.73913043478261,
"alnum_prop": 0.6293494704992436,
"repo_name": "onecrayon/PopClip-Extensions",
"id": "ff6c4cd77633f9f4741390a279094081d7dee15f",
"size": "1322",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "source/OneNote/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "12720"
},
{
"name": "JavaScript",
"bytes": "137"
},
{
"name": "Objective-C",
"bytes": "10556"
},
{
"name": "PHP",
"bytes": "140106"
},
{
"name": "Perl",
"bytes": "5449"
},
{
"name": "Python",
"bytes": "3162766"
},
{
"name": "Ruby",
"bytes": "284"
},
{
"name": "Shell",
"bytes": "6878"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from wysadzulice import models
admin.site.register(models.CatalogItem)
admin.site.register(models.Mail)
| {
"content_hash": "23c0c85865b39cb706cfa72901f31cfe",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 20,
"alnum_prop": 0.8285714285714286,
"repo_name": "komitywa/wysadzulice.pl",
"id": "990526e34988040fecb4049d1e2af8974a997623",
"size": "165",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wysadzulice/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5560"
},
{
"name": "JavaScript",
"bytes": "3341"
},
{
"name": "Python",
"bytes": "10215"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from filer.models import filemodels
from filer.utils.compatibility import python_2_unicode_compatible
@python_2_unicode_compatible
class Clipboard(models.Model):
user = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'), verbose_name=_('user'), related_name="filer_clipboards")
files = models.ManyToManyField(
'File', verbose_name=_('files'), related_name="in_clipboards",
through='ClipboardItem')
def append_file(self, file_obj):
try:
# We have to check if file is already in the clipboard as otherwise polymorphic complains
self.files.get(pk=file_obj.pk)
return False
except filemodels.File.DoesNotExist:
newitem = ClipboardItem(file=file_obj, clipboard=self)
newitem.save()
return True
def __str__(self):
return "Clipboard %s of %s" % (self.id, self.user)
class Meta(object):
app_label = 'filer'
verbose_name = _('clipboard')
verbose_name_plural = _('clipboards')
class ClipboardItem(models.Model):
file = models.ForeignKey('File', verbose_name=_('file'))
clipboard = models.ForeignKey(Clipboard, verbose_name=_('clipboard'))
class Meta(object):
app_label = 'filer'
verbose_name = _('clipboard item')
verbose_name_plural = _('clipboard items')
| {
"content_hash": "5aea01fb4a52fa631661713a21166859",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 136,
"avg_line_length": 34.63636363636363,
"alnum_prop": 0.6601049868766404,
"repo_name": "DylannCordel/django-filer",
"id": "05cce030fbc21e22381833ceabbdde001f537615",
"size": "1549",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "filer/models/clipboardmodels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84475"
},
{
"name": "HTML",
"bytes": "72813"
},
{
"name": "JavaScript",
"bytes": "46053"
},
{
"name": "Python",
"bytes": "465559"
},
{
"name": "Ruby",
"bytes": "1157"
},
{
"name": "Shell",
"bytes": "139"
}
],
"symlink_target": ""
} |
import unittest
import os
from signaturit_sdk.signaturit_client import SignaturitClient
from signaturit_sdk.resources.parser import Parser
import httpretty
import warnings
class TestEmail(unittest.TestCase):
TEST_FILE_URL = '/tmp/test.pdf'
def setUp(self):
warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*")
def test_create_email_with_invalid_params_should_raise_exception(self):
client = SignaturitClient('TOKEN')
self.assertRaises(Exception, client.create_email, {'testing': 'some_value'})
@httpretty.activate
def test_get_emails(self):
httpretty.register_uri(httpretty.GET, "https://api.sandbox.signaturit.com/v3/emails.json",
body='{"recipients": [{"email": "[email protected]", "fullname": "Mr Test"}],"subject": "Testing"}',
content_type="application/json")
signaturit_client = SignaturitClient('SOME_TOKEN')
response = signaturit_client.get_emails()
self.assertEqual('Testing', response['subject'])
self.assertEqual([{"email": "[email protected]", "fullname": "Mr Test"}], response['recipients'])
@httpretty.activate
def test_count_emails(self):
httpretty.register_uri(httpretty.GET, "https://api.sandbox.signaturit.com/v3/emails/count.json",
body='3',
content_type="application/json")
signaturit_client = SignaturitClient('SOME_TOKEN')
response = signaturit_client.count_emails()
self.assertEqual(3, response)
@httpretty.activate
def get_email(self):
httpretty.register_uri(httpretty.GET, "https://api.sandbox.signaturit.com/v2/email/EMAIL_ID.json",
body='{"id": "SIGN_ID", ' +
'"recipients": [{"email": "[email protected]", "fullname": "Mr Test"}], ' +
'"subject": "Testing"}',
content_type="application/json")
signaturit_client = SignaturitClient('SOME_TOKEN')
response = signaturit_client.get_email('SIGN_ID')
self.assertEqual('Testing', response['subject'])
self.assertEqual('SIGN_ID', response['id'])
self.assertEqual([{"email": "[email protected]", "fullname": "Mr Test"}], response['recipients'])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "3265788f96ba1b1f9695ded30630ab19",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 127,
"avg_line_length": 39.193548387096776,
"alnum_prop": 0.6053497942386832,
"repo_name": "signaturit/python-sdk",
"id": "dd1be139333aecb868436ac436951499cb09c266",
"size": "2430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "signaturit_sdk/tests/test_email.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37223"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import importlib
import inspect
import json
import logging
import sys
import time
import threading
import traceback
from collections import (
namedtuple,
defaultdict,
)
import ray
from ray import profiling
from ray import ray_constants
from ray import cloudpickle as pickle
from ray.utils import (
binary_to_hex,
is_cython,
is_function_or_method,
is_class_method,
check_oversized_pickle,
decode,
ensure_str,
format_error_message,
push_error_to_driver,
)
FunctionExecutionInfo = namedtuple("FunctionExecutionInfo",
["function", "function_name", "max_calls"])
"""FunctionExecutionInfo: A named tuple storing remote function information."""
logger = logging.getLogger(__name__)
class FunctionDescriptor(object):
"""A class used to describe a python function.
Attributes:
module_name: the module name that the function belongs to.
class_name: the class name that the function belongs to if exists.
It could be empty is the function is not a class method.
function_name: the function name of the function.
function_hash: the hash code of the function source code if the
function code is available.
function_id: the function id calculated from this descriptor.
is_for_driver_task: whether this descriptor is for driver task.
"""
def __init__(self,
module_name,
function_name,
class_name="",
function_source_hash=b""):
self._module_name = module_name
self._class_name = class_name
self._function_name = function_name
self._function_source_hash = function_source_hash
self._function_id = self._get_function_id()
def __repr__(self):
return ("FunctionDescriptor:" + self._module_name + "." +
self._class_name + "." + self._function_name + "." +
binary_to_hex(self._function_source_hash))
@classmethod
def from_bytes_list(cls, function_descriptor_list):
"""Create a FunctionDescriptor instance from list of bytes.
This function is used to create the function descriptor from
backend data.
Args:
cls: Current class which is required argument for classmethod.
function_descriptor_list: list of bytes to represent the
function descriptor.
Returns:
The FunctionDescriptor instance created from the bytes list.
"""
assert isinstance(function_descriptor_list, list)
if len(function_descriptor_list) == 0:
# This is a function descriptor of driver task.
return FunctionDescriptor.for_driver_task()
elif (len(function_descriptor_list) == 3
or len(function_descriptor_list) == 4):
module_name = ensure_str(function_descriptor_list[0])
class_name = ensure_str(function_descriptor_list[1])
function_name = ensure_str(function_descriptor_list[2])
if len(function_descriptor_list) == 4:
return cls(module_name, function_name, class_name,
function_descriptor_list[3])
else:
return cls(module_name, function_name, class_name)
else:
raise Exception(
"Invalid input for FunctionDescriptor.from_bytes_list")
@classmethod
def from_function(cls, function):
"""Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function.
Args:
cls: Current class which is required argument for classmethod.
function: the python function used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the function.
"""
module_name = function.__module__
function_name = function.__name__
class_name = ""
function_source_hasher = hashlib.sha1()
try:
# If we are running a script or are in IPython, include the source
# code in the hash.
source = inspect.getsource(function)
if sys.version_info[0] >= 3:
source = source.encode()
function_source_hasher.update(source)
function_source_hash = function_source_hasher.digest()
except (IOError, OSError, TypeError):
# Source code may not be available:
# e.g. Cython or Python interpreter.
function_source_hash = b""
return cls(module_name, function_name, class_name,
function_source_hash)
@classmethod
def from_class(cls, target_class):
"""Create a FunctionDescriptor from a class.
Args:
cls: Current class which is required argument for classmethod.
target_class: the python class used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the class.
"""
module_name = target_class.__module__
class_name = target_class.__name__
return cls(module_name, "__init__", class_name)
@classmethod
def for_driver_task(cls):
"""Create a FunctionDescriptor instance for a driver task."""
return cls("", "", "", b"")
@property
def is_for_driver_task(self):
"""See whether this function descriptor is for a driver or not.
Returns:
True if this function descriptor is for driver tasks.
"""
return all(
len(x) == 0
for x in [self.module_name, self.class_name, self.function_name])
@property
def module_name(self):
"""Get the module name of current function descriptor.
Returns:
The module name of the function descriptor.
"""
return self._module_name
@property
def class_name(self):
"""Get the class name of current function descriptor.
Returns:
The class name of the function descriptor. It could be
empty if the function is not a class method.
"""
return self._class_name
@property
def function_name(self):
"""Get the function name of current function descriptor.
Returns:
The function name of the function descriptor.
"""
return self._function_name
@property
def function_hash(self):
"""Get the hash code of the function source code.
Returns:
The bytes with length of ray_constants.ID_SIZE if the source
code is available. Otherwise, the bytes length will be 0.
"""
return self._function_source_hash
@property
def function_id(self):
"""Get the function id calculated from this descriptor.
Returns:
The value of ray.ObjectID that represents the function id.
"""
return self._function_id
def _get_function_id(self):
"""Calculate the function id of current function descriptor.
This function id is calculated from all the fields of function
descriptor.
Returns:
ray.ObjectID to represent the function descriptor.
"""
if self.is_for_driver_task:
return ray.FunctionID.nil()
function_id_hash = hashlib.sha1()
# Include the function module and name in the hash.
function_id_hash.update(self.module_name.encode("ascii"))
function_id_hash.update(self.function_name.encode("ascii"))
function_id_hash.update(self.class_name.encode("ascii"))
function_id_hash.update(self._function_source_hash)
# Compute the function ID.
function_id = function_id_hash.digest()
return ray.FunctionID(function_id)
def get_function_descriptor_list(self):
"""Return a list of bytes representing the function descriptor.
This function is used to pass this function descriptor to backend.
Returns:
A list of bytes.
"""
descriptor_list = []
if self.is_for_driver_task:
# Driver task returns an empty list.
return descriptor_list
else:
descriptor_list.append(self.module_name.encode("ascii"))
descriptor_list.append(self.class_name.encode("ascii"))
descriptor_list.append(self.function_name.encode("ascii"))
if len(self._function_source_hash) != 0:
descriptor_list.append(self._function_source_hash)
return descriptor_list
def is_actor_method(self):
"""Wether this function descriptor is an actor method.
Returns:
True if it's an actor method, False if it's a normal function.
"""
return len(self._class_name) > 0
class FunctionActorManager(object):
"""A class used to export/load remote functions and actors.
Attributes:
_worker: The associated worker that this manager related.
_functions_to_export: The remote functions to export when
the worker gets connected.
_actors_to_export: The actors to export when the worker gets
connected.
_function_execution_info: The map from driver_id to finction_id
and execution_info.
_num_task_executions: The map from driver_id to function
execution times.
imported_actor_classes: The set of actor classes keys (format:
ActorClass:function_id) that are already in GCS.
"""
def __init__(self, worker):
self._worker = worker
self._functions_to_export = []
self._actors_to_export = []
# This field is a dictionary that maps a driver ID to a dictionary of
# functions (and information about those functions) that have been
# registered for that driver (this inner dictionary maps function IDs
# to a FunctionExecutionInfo object. This should only be used on
# workers that execute remote functions.
self._function_execution_info = defaultdict(lambda: {})
self._num_task_executions = defaultdict(lambda: {})
# A set of all of the actor class keys that have been imported by the
# import thread. It is safe to convert this worker into an actor of
# these types.
self.imported_actor_classes = set()
self._loaded_actor_classes = {}
self.lock = threading.Lock()
def increase_task_counter(self, driver_id, function_descriptor):
function_id = function_descriptor.function_id
if self._worker.load_code_from_local:
driver_id = ray.DriverID.nil()
self._num_task_executions[driver_id][function_id] += 1
def get_task_counter(self, driver_id, function_descriptor):
function_id = function_descriptor.function_id
if self._worker.load_code_from_local:
driver_id = ray.DriverID.nil()
return self._num_task_executions[driver_id][function_id]
def export_cached(self):
"""Export cached remote functions
Note: this should be called only once when worker is connected.
"""
for remote_function in self._functions_to_export:
self._do_export(remote_function)
self._functions_to_export = None
for info in self._actors_to_export:
(key, actor_class_info) = info
self._publish_actor_class_to_key(key, actor_class_info)
def reset_cache(self):
self._functions_to_export = []
self._actors_to_export = []
def export(self, remote_function):
"""Export a remote function.
Args:
remote_function: the RemoteFunction object.
"""
if self._worker.mode is None:
# If the worker isn't connected, cache the function
# and export it later.
self._functions_to_export.append(remote_function)
return
if self._worker.mode == ray.worker.LOCAL_MODE:
# Don't need to export if the worker is not a driver.
return
self._do_export(remote_function)
def _do_export(self, remote_function):
"""Pickle a remote function and export it to redis.
Args:
remote_function: the RemoteFunction object.
"""
if self._worker.load_code_from_local:
return
# Work around limitations of Python pickling.
function = remote_function._function
function_name_global_valid = function.__name__ in function.__globals__
function_name_global_value = function.__globals__.get(
function.__name__)
# Allow the function to reference itself as a global variable
if not is_cython(function):
function.__globals__[function.__name__] = remote_function
try:
pickled_function = pickle.dumps(function)
finally:
# Undo our changes
if function_name_global_valid:
function.__globals__[function.__name__] = (
function_name_global_value)
else:
del function.__globals__[function.__name__]
check_oversized_pickle(pickled_function,
remote_function._function_name,
"remote function", self._worker)
key = (b"RemoteFunction:" + self._worker.task_driver_id.binary() + b":"
+ remote_function._function_descriptor.function_id.binary())
self._worker.redis_client.hmset(
key, {
"driver_id": self._worker.task_driver_id.binary(),
"function_id": remote_function._function_descriptor.
function_id.binary(),
"name": remote_function._function_name,
"module": function.__module__,
"function": pickled_function,
"max_calls": remote_function._max_calls
})
self._worker.redis_client.rpush("Exports", key)
def fetch_and_register_remote_function(self, key):
"""Import a remote function."""
(driver_id_str, function_id_str, function_name, serialized_function,
num_return_vals, module, resources,
max_calls) = self._worker.redis_client.hmget(key, [
"driver_id", "function_id", "name", "function", "num_return_vals",
"module", "resources", "max_calls"
])
function_id = ray.FunctionID(function_id_str)
driver_id = ray.DriverID(driver_id_str)
function_name = decode(function_name)
max_calls = int(max_calls)
module = decode(module)
# This is a placeholder in case the function can't be unpickled. This
# will be overwritten if the function is successfully registered.
def f():
raise Exception("This function was not imported properly.")
# This function is called by ImportThread. This operation needs to be
# atomic. Otherwise, there is race condition. Another thread may use
# the temporary function above before the real function is ready.
with self.lock:
self._function_execution_info[driver_id][function_id] = (
FunctionExecutionInfo(
function=f,
function_name=function_name,
max_calls=max_calls))
self._num_task_executions[driver_id][function_id] = 0
try:
function = pickle.loads(serialized_function)
except Exception:
# If an exception was thrown when the remote function was
# imported, we record the traceback and notify the scheduler
# of the failure.
traceback_str = format_error_message(traceback.format_exc())
# Log the error message.
push_error_to_driver(
self._worker,
ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR,
"Failed to unpickle the remote function '{}' with "
"function ID {}. Traceback:\n{}".format(
function_name, function_id.hex(), traceback_str),
driver_id=driver_id)
else:
# The below line is necessary. Because in the driver process,
# if the function is defined in the file where the python
# script was started from, its module is `__main__`.
# However in the worker process, the `__main__` module is a
# different module, which is `default_worker.py`
function.__module__ = module
self._function_execution_info[driver_id][function_id] = (
FunctionExecutionInfo(
function=function,
function_name=function_name,
max_calls=max_calls))
# Add the function to the function table.
self._worker.redis_client.rpush(
b"FunctionTable:" + function_id.binary(),
self._worker.worker_id)
def get_execution_info(self, driver_id, function_descriptor):
"""Get the FunctionExecutionInfo of a remote function.
Args:
driver_id: ID of the driver that the function belongs to.
function_descriptor: The FunctionDescriptor of the function to get.
Returns:
A FunctionExecutionInfo object.
"""
if self._worker.load_code_from_local:
# Load function from local code.
# Currently, we don't support isolating code by drivers,
# thus always set driver ID to NIL here.
driver_id = ray.DriverID.nil()
if not function_descriptor.is_actor_method():
self._load_function_from_local(driver_id, function_descriptor)
else:
# Load function from GCS.
# Wait until the function to be executed has actually been
# registered on this worker. We will push warnings to the user if
# we spend too long in this loop.
# The driver function may not be found in sys.path. Try to load
# the function from GCS.
with profiling.profile("wait_for_function"):
self._wait_for_function(function_descriptor, driver_id)
try:
function_id = function_descriptor.function_id
info = self._function_execution_info[driver_id][function_id]
except KeyError as e:
message = ("Error occurs in get_execution_info: "
"driver_id: %s, function_descriptor: %s. Message: %s" %
(driver_id, function_descriptor, e))
raise KeyError(message)
return info
def _load_function_from_local(self, driver_id, function_descriptor):
assert not function_descriptor.is_actor_method()
function_id = function_descriptor.function_id
if (driver_id in self._function_execution_info
and function_id in self._function_execution_info[function_id]):
return
module_name, function_name = (
function_descriptor.module_name,
function_descriptor.function_name,
)
try:
module = importlib.import_module(module_name)
function = getattr(module, function_name)._function
self._function_execution_info[driver_id][function_id] = (
FunctionExecutionInfo(
function=function,
function_name=function_name,
max_calls=0,
))
self._num_task_executions[driver_id][function_id] = 0
except Exception:
logger.exception(
"Failed to load function %s.".format(function_name))
raise Exception(
"Function {} failed to be loaded from local code.".format(
function_descriptor))
def _wait_for_function(self, function_descriptor, driver_id, timeout=10):
"""Wait until the function to be executed is present on this worker.
This method will simply loop until the import thread has imported the
relevant function. If we spend too long in this loop, that may indicate
a problem somewhere and we will push an error message to the user.
If this worker is an actor, then this will wait until the actor has
been defined.
Args:
function_descriptor : The FunctionDescriptor of the function that
we want to execute.
driver_id (str): The ID of the driver to push the error message to
if this times out.
"""
start_time = time.time()
# Only send the warning once.
warning_sent = False
while True:
with self.lock:
if (self._worker.actor_id.is_nil()
and (function_descriptor.function_id in
self._function_execution_info[driver_id])):
break
elif not self._worker.actor_id.is_nil() and (
self._worker.actor_id in self._worker.actors):
break
if time.time() - start_time > timeout:
warning_message = ("This worker was asked to execute a "
"function that it does not have "
"registered. You may have to restart "
"Ray.")
if not warning_sent:
ray.utils.push_error_to_driver(
self._worker,
ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR,
warning_message,
driver_id=driver_id)
warning_sent = True
time.sleep(0.001)
def _publish_actor_class_to_key(self, key, actor_class_info):
"""Push an actor class definition to Redis.
The is factored out as a separate function because it is also called
on cached actor class definitions when a worker connects for the first
time.
Args:
key: The key to store the actor class info at.
actor_class_info: Information about the actor class.
"""
# We set the driver ID here because it may not have been available when
# the actor class was defined.
self._worker.redis_client.hmset(key, actor_class_info)
self._worker.redis_client.rpush("Exports", key)
def export_actor_class(self, Class, actor_method_names):
if self._worker.load_code_from_local:
return
function_descriptor = FunctionDescriptor.from_class(Class)
# `task_driver_id` shouldn't be NIL, unless:
# 1) This worker isn't an actor;
# 2) And a previous task started a background thread, which didn't
# finish before the task finished, and still uses Ray API
# after that.
assert not self._worker.task_driver_id.is_nil(), (
"You might have started a background thread in a non-actor task, "
"please make sure the thread finishes before the task finishes.")
driver_id = self._worker.task_driver_id
key = (b"ActorClass:" + driver_id.binary() + b":" +
function_descriptor.function_id.binary())
actor_class_info = {
"class_name": Class.__name__,
"module": Class.__module__,
"class": pickle.dumps(Class),
"driver_id": driver_id.binary(),
"actor_method_names": json.dumps(list(actor_method_names))
}
check_oversized_pickle(actor_class_info["class"],
actor_class_info["class_name"], "actor",
self._worker)
if self._worker.mode is None:
# This means that 'ray.init()' has not been called yet and so we
# must cache the actor class definition and export it when
# 'ray.init()' is called.
assert self._actors_to_export is not None
self._actors_to_export.append((key, actor_class_info))
# This caching code path is currently not used because we only
# export actor class definitions lazily when we instantiate the
# actor for the first time.
assert False, "This should be unreachable."
else:
self._publish_actor_class_to_key(key, actor_class_info)
# TODO(rkn): Currently we allow actor classes to be defined
# within tasks. I tried to disable this, but it may be necessary
# because of https://github.com/ray-project/ray/issues/1146.
def load_actor_class(self, driver_id, function_descriptor):
"""Load the actor class.
Args:
driver_id: Driver ID of the actor.
function_descriptor: Function descriptor of the actor constructor.
Returns:
The actor class.
"""
function_id = function_descriptor.function_id
# Check if the actor class already exists in the cache.
actor_class = self._loaded_actor_classes.get(function_id, None)
if actor_class is None:
# Load actor class.
if self._worker.load_code_from_local:
driver_id = ray.DriverID.nil()
# Load actor class from local code.
actor_class = self._load_actor_from_local(
driver_id, function_descriptor)
else:
# Load actor class from GCS.
actor_class = self._load_actor_class_from_gcs(
driver_id, function_descriptor)
# Save the loaded actor class in cache.
self._loaded_actor_classes[function_id] = actor_class
# Generate execution info for the methods of this actor class.
module_name = function_descriptor.module_name
actor_class_name = function_descriptor.class_name
actor_methods = inspect.getmembers(
actor_class, predicate=is_function_or_method)
for actor_method_name, actor_method in actor_methods:
method_descriptor = FunctionDescriptor(
module_name, actor_method_name, actor_class_name)
method_id = method_descriptor.function_id
executor = self._make_actor_method_executor(
actor_method_name,
actor_method,
actor_imported=True,
)
self._function_execution_info[driver_id][method_id] = (
FunctionExecutionInfo(
function=executor,
function_name=actor_method_name,
max_calls=0,
))
self._num_task_executions[driver_id][method_id] = 0
self._num_task_executions[driver_id][function_id] = 0
return actor_class
def _load_actor_from_local(self, driver_id, function_descriptor):
"""Load actor class from local code."""
module_name, class_name = (function_descriptor.module_name,
function_descriptor.class_name)
try:
module = importlib.import_module(module_name)
actor_class = getattr(module, class_name)
if isinstance(actor_class, ray.actor.ActorClass):
return actor_class._modified_class
else:
return actor_class
except Exception:
logger.exception(
"Failed to load actor_class %s.".format(class_name))
raise Exception(
"Actor {} failed to be imported from local code.".format(
class_name))
def _create_fake_actor_class(self, actor_class_name, actor_method_names):
class TemporaryActor(object):
pass
def temporary_actor_method(*xs):
raise Exception(
"The actor with name {} failed to be imported, "
"and so cannot execute this method.".format(actor_class_name))
for method in actor_method_names:
setattr(TemporaryActor, method, temporary_actor_method)
return TemporaryActor
def _load_actor_class_from_gcs(self, driver_id, function_descriptor):
"""Load actor class from GCS."""
key = (b"ActorClass:" + driver_id.binary() + b":" +
function_descriptor.function_id.binary())
# Wait for the actor class key to have been imported by the
# import thread. TODO(rkn): It shouldn't be possible to end
# up in an infinite loop here, but we should push an error to
# the driver if too much time is spent here.
while key not in self.imported_actor_classes:
time.sleep(0.001)
# Fetch raw data from GCS.
(driver_id_str, class_name, module, pickled_class,
actor_method_names) = self._worker.redis_client.hmget(
key, [
"driver_id", "class_name", "module", "class",
"actor_method_names"
])
class_name = ensure_str(class_name)
module_name = ensure_str(module)
driver_id = ray.DriverID(driver_id_str)
actor_method_names = json.loads(ensure_str(actor_method_names))
actor_class = None
try:
with self.lock:
actor_class = pickle.loads(pickled_class)
except Exception:
logger.exception(
"Failed to load actor class %s.".format(class_name))
# The actor class failed to be unpickled, create a fake actor
# class instead (just to produce error messages and to prevent
# the driver from hanging).
actor_class = self._create_fake_actor_class(
class_name, actor_method_names)
# If an exception was thrown when the actor was imported, we record
# the traceback and notify the scheduler of the failure.
traceback_str = ray.utils.format_error_message(
traceback.format_exc())
# Log the error message.
push_error_to_driver(
self._worker, ray_constants.REGISTER_ACTOR_PUSH_ERROR,
"Failed to unpickle actor class '{}' for actor ID {}. "
"Traceback:\n{}".format(class_name,
self._worker.actor_id.hex(),
traceback_str), driver_id)
# TODO(rkn): In the future, it might make sense to have the worker
# exit here. However, currently that would lead to hanging if
# someone calls ray.get on a method invoked on the actor.
# The below line is necessary. Because in the driver process,
# if the function is defined in the file where the python script
# was started from, its module is `__main__`.
# However in the worker process, the `__main__` module is a
# different module, which is `default_worker.py`
actor_class.__module__ = module_name
return actor_class
def _make_actor_method_executor(self, method_name, method, actor_imported):
"""Make an executor that wraps a user-defined actor method.
The wrapped method updates the worker's internal state and performs any
necessary checkpointing operations.
Args:
method_name (str): The name of the actor method.
method (instancemethod): The actor method to wrap. This should be a
method defined on the actor class and should therefore take an
instance of the actor as the first argument.
actor_imported (bool): Whether the actor has been imported.
Checkpointing operations will not be run if this is set to
False.
Returns:
A function that executes the given actor method on the worker's
stored instance of the actor. The function also updates the
worker's internal state to record the executed method.
"""
def actor_method_executor(dummy_return_id, actor, *args):
# Update the actor's task counter to reflect the task we're about
# to execute.
self._worker.actor_task_counter += 1
# Execute the assigned method and save a checkpoint if necessary.
try:
if is_class_method(method):
method_returns = method(*args)
else:
method_returns = method(actor, *args)
except Exception as e:
# Save the checkpoint before allowing the method exception
# to be thrown, but don't save the checkpoint for actor
# creation task.
if (isinstance(actor, ray.actor.Checkpointable)
and self._worker.actor_task_counter != 1):
self._save_and_log_checkpoint(actor)
raise e
else:
# Handle any checkpointing operations before storing the
# method's return values.
# NOTE(swang): If method_returns is a pointer to the actor's
# state and the checkpointing operations can modify the return
# values if they mutate the actor's state. Is this okay?
if isinstance(actor, ray.actor.Checkpointable):
# If this is the first task to execute on the actor, try to
# resume from a checkpoint.
if self._worker.actor_task_counter == 1:
if actor_imported:
self._restore_and_log_checkpoint(actor)
else:
# Save the checkpoint before returning the method's
# return values.
self._save_and_log_checkpoint(actor)
return method_returns
return actor_method_executor
def _save_and_log_checkpoint(self, actor):
"""Save an actor checkpoint if necessary and log any errors.
Args:
actor: The actor to checkpoint.
Returns:
The result of the actor's user-defined `save_checkpoint` method.
"""
actor_id = self._worker.actor_id
checkpoint_info = self._worker.actor_checkpoint_info[actor_id]
checkpoint_info.num_tasks_since_last_checkpoint += 1
now = int(1000 * time.time())
checkpoint_context = ray.actor.CheckpointContext(
actor_id, checkpoint_info.num_tasks_since_last_checkpoint,
now - checkpoint_info.last_checkpoint_timestamp)
# If we should take a checkpoint, notify raylet to prepare a checkpoint
# and then call `save_checkpoint`.
if actor.should_checkpoint(checkpoint_context):
try:
now = int(1000 * time.time())
checkpoint_id = (self._worker.raylet_client.
prepare_actor_checkpoint(actor_id))
checkpoint_info.checkpoint_ids.append(checkpoint_id)
actor.save_checkpoint(actor_id, checkpoint_id)
if (len(checkpoint_info.checkpoint_ids) >
ray._config.num_actor_checkpoints_to_keep()):
actor.checkpoint_expired(
actor_id,
checkpoint_info.checkpoint_ids.pop(0),
)
checkpoint_info.num_tasks_since_last_checkpoint = 0
checkpoint_info.last_checkpoint_timestamp = now
except Exception:
# Checkpoint save or reload failed. Notify the driver.
traceback_str = ray.utils.format_error_message(
traceback.format_exc())
ray.utils.push_error_to_driver(
self._worker,
ray_constants.CHECKPOINT_PUSH_ERROR,
traceback_str,
driver_id=self._worker.task_driver_id)
def _restore_and_log_checkpoint(self, actor):
"""Restore an actor from a checkpoint if available and log any errors.
This should only be called on workers that have just executed an actor
creation task.
Args:
actor: The actor to restore from a checkpoint.
"""
actor_id = self._worker.actor_id
try:
checkpoints = ray.actor.get_checkpoints_for_actor(actor_id)
if len(checkpoints) > 0:
# If we found previously saved checkpoints for this actor,
# call the `load_checkpoint` callback.
checkpoint_id = actor.load_checkpoint(actor_id, checkpoints)
if checkpoint_id is not None:
# Check that the returned checkpoint id is in the
# `available_checkpoints` list.
msg = (
"`load_checkpoint` must return a checkpoint id that " +
"exists in the `available_checkpoints` list, or eone.")
assert any(checkpoint_id == checkpoint.checkpoint_id
for checkpoint in checkpoints), msg
# Notify raylet that this actor has been resumed from
# a checkpoint.
(self._worker.raylet_client.
notify_actor_resumed_from_checkpoint(
actor_id, checkpoint_id))
except Exception:
# Checkpoint save or reload failed. Notify the driver.
traceback_str = ray.utils.format_error_message(
traceback.format_exc())
ray.utils.push_error_to_driver(
self._worker,
ray_constants.CHECKPOINT_PUSH_ERROR,
traceback_str,
driver_id=self._worker.task_driver_id)
| {
"content_hash": "20789a7b28bc07ba2a4dbfe6e10ba5fd",
"timestamp": "",
"source": "github",
"line_count": 901,
"max_line_length": 79,
"avg_line_length": 42.72586015538291,
"alnum_prop": 0.5807616375727348,
"repo_name": "atumanov/ray",
"id": "4914c9f870501ac22814f650ef8e295cee01a582",
"size": "38496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/function_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20715"
},
{
"name": "C++",
"bytes": "1036803"
},
{
"name": "CSS",
"bytes": "9262"
},
{
"name": "Dockerfile",
"bytes": "3411"
},
{
"name": "HTML",
"bytes": "32704"
},
{
"name": "Java",
"bytes": "517715"
},
{
"name": "JavaScript",
"bytes": "8178"
},
{
"name": "Jupyter Notebook",
"bytes": "1610"
},
{
"name": "Python",
"bytes": "3081422"
},
{
"name": "Ruby",
"bytes": "956"
},
{
"name": "Shell",
"bytes": "76928"
},
{
"name": "Smarty",
"bytes": "955"
}
],
"symlink_target": ""
} |
from PyQt4 import QtCore, QtGui, QtOpenGL
import time
class ProgressBar(QtGui.QProgressBar):
def __init__(self):
super(ProgressBar, self).__init__()
self.setWindowIcon(QtGui.QIcon('icons/windu_vision.png'))
self.setGeometry(200, 200, 640, 45)
self.setFixedSize(640, 45)
self.setTextVisible(True)
def progress_update(self, text_value):
if not self.isVisible():
self.show()
text, value = text_value
self.setWindowTitle(text)
self.setValue(value)
if value == 100:
time.sleep(0.5)
self.hide()
| {
"content_hash": "fbe0efcf91e0ae9cdc2914947c4c4462",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 65,
"avg_line_length": 23.884615384615383,
"alnum_prop": 0.5942028985507246,
"repo_name": "linyc74/WinduVision",
"id": "8d5205d74bee9b51c94f887b37cd384677cfcd36",
"size": "621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/gui_progress_bar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113806"
}
],
"symlink_target": ""
} |
""" Multiuser Node Chat script for x/84, https://github.com/jquast/x84
This script was coded by Jeff Quast to be a part of x/84 v1.x and it
was taken out in favour of the splitscreen chatter. It's not supported
anymore and is to be used on your own RISK.
"""
# unfortunately the hardlinemode stuff that irssi and such uses
# is not used, so each line causes a full screen fresh ..
import time
POLL_KEY = 0.15 # blocking ;; how often to poll keyboard
POLL_OUT = 0.25 # seconds elapsed before screen update, prevents flood
CHANNEL = None
NICKS = dict()
EXIT = False
def show_help():
""" return string suitable for response to /help. """
return u'\n'.join((
u' /join #channel',
u' /act mesg',
u' /part [reason]',
u' /quit [reason]',
u' /users',
u' /whois handle',))
def process(mesg):
"""
Process a command recieved by event system. and return string
suitable for displaying in chat window.
"""
from x84.bbs import getsession
session = getsession()
sid, tgt_channel, (handle, cmd, args) = mesg
ucs = u''
# pylint: disable=W0602
# Using global for 'NICKS' but no assignment is done
global NICKS
if (CHANNEL != tgt_channel and 'sysop' not in session.user.groups):
ucs = u''
elif cmd == 'join':
if handle not in NICKS:
NICKS[handle] = sid
ucs = show_join(handle, sid, tgt_channel)
elif handle not in NICKS:
NICKS[handle] = sid
elif cmd == 'part':
if handle in NICKS:
del NICKS[handle]
ucs = show_part(handle, sid, tgt_channel, args)
elif cmd == 'say':
ucs = show_say(handle, tgt_channel, args)
elif cmd == 'act':
ucs = show_act(handle, tgt_channel, args)
else:
ucs = u'unhandled: %r' % (mesg,)
return ucs
def show_act(handle, tgt_channel, mesg):
""" return terminal sequence for /act performed by handle. """
from x84.bbs import getsession, getterminal
session, term = getsession(), getterminal()
return u''.join((
time.strftime('%H:%M'), u' * ',
(term.bold_green(handle) if handle != session.user.handle
else term.green(handle)),
(u':%s' % (tgt_channel,)
if 'sysop' in session.user.groups
else u''), u' ',
mesg,))
def show_join(handle, sid, chan):
""" return terminal sequence for /join performed by handle. """
from x84.bbs import getsession, getterminal
session, term = getsession(), getterminal()
return u''.join((
time.strftime('%H:%M'), u' ',
term.blue('-'), u'!', term.blue('-'),
u' ', term.bold_cyan(handle), u' ',
(u''.join((term.bold_black('['),
term.cyan(sid), term.bold_black(']'), u' ',))
if 'sysop' in session.user.groups else u''),
'has joined ',
term.bold(chan),))
def show_part(handle, sid, chan, reason):
""" return terminal sequence for /part performed by handle. """
from x84.bbs import getsession, getterminal
session, term = getsession(), getterminal()
return u''.join((
time.strftime('%H:%M'), u' ',
term.blue('-'), u'!', term.blue('-'),
u' ', term.bold_cyan(handle), u' ',
(u''.join((term.bold_black('['),
term.cyan(sid), term.bold_black(']'), u' ',))
if 'sysop' in session.user.groups else u''),
'has left ',
term.bold(chan),
u' (%s)' % (reason,) if reason and 0 != len(reason) else u'',))
def show_whois(attrs):
""" return terminal sequence for /whois result. """
from x84.bbs import getsession, getterminal, timeago
session, term = getsession(), getterminal()
return u''.join((
time.strftime('%H:%M'), u' ',
term.blue('-'), u'!', term.blue('-'),
u' ', term.bold(attrs['handle']), u' ',
(u''.join((term.bold_black('['),
term.cyan(attrs['sid']), term.bold_black(']'), u' ',))
if 'sysop' in session.user.groups else u''), u'\n',
time.strftime('%H:%M'), u' ',
term.blue('-'), u'!', term.blue('-'),
u' ', u'CONNECtED ',
term.bold_cyan(timeago(time.time() - attrs['connect_time'])),
' AGO.', u'\n',
time.strftime('%H:%M'), u' ',
term.blue('-'), u'!', term.blue('-'),
u' ', term.bold(u'idlE: '),
term.bold_cyan(timeago(time.time() - attrs['idle'])), u'\n',
))
def show_nicks(handles):
""" return terminal sequence for /users result. """
from x84.bbs import getterminal
term = getterminal()
return u''.join((
time.strftime('%H:%M'), u' ',
term.blue('-'), u'!', term.blue('-'),
u' ', term.bold_cyan('%d' % (len(handles))), u' ',
u'user%s: ' % (u's' if len(handles) > 1 else u''),
u', '.join(handles) + u'\n',))
def show_say(handle, tgt_channel, mesg):
""" return terminal sequence for /say performed by handle. """
from x84.bbs import getsession, getterminal, get_user
session, term = getsession(), getterminal()
return u''.join((
time.strftime('%H:%M'), u' ',
term.bold_black(u'<'),
(term.bold_red(u'@') if handle != 'anonymous'
and 'sysop' in get_user(handle).groups
else u''),
(handle if handle != session.user.handle
else term.bold(handle)),
(u':%s' % (tgt_channel,)
if 'sysop' in session.user.groups
else u''),
term.bold_black(u'>'), u' ',
mesg,))
def get_inputbar(pager):
""" Return ScrollingEditor for use as inputbar. """
from x84.bbs import getterminal, ScrollingEditor
term = getterminal()
width = pager.visible_width - 2
yloc = (pager.yloc + pager.height) - 2
xloc = pager.xloc + 2
ibar = ScrollingEditor(width=width, yloc=yloc, xloc=xloc)
ibar.enable_scrolling = True
ibar.max_length = 512
ibar.colors['highlight'] = term.cyan_reverse
return ibar
def get_pager(pager=None):
""" Return Pager for use as chat window. """
from x84.bbs import getterminal, Pager
term = getterminal()
height = (term.height - 4)
width = int(term.width * .9)
yloc = term.height - height - 1
xloc = int(term.width / 2) - (width / 2)
new_pager = Pager(height, width, yloc, xloc)
if pager is not None:
content = pager.content
# little hack to keep empty lines from re-importing
for row in range(len(content)):
ucs = content[row]
if ucs.startswith(u'\x1b(B'):
ucs = ucs[len(u'\x1b(B'):]
if ucs.endswith(u'\x1b[m'):
ucs = ucs[len(u'\x1b[m'):]
content[row] = ucs
new_pager.update('\r\n'.join(content))
new_pager.enable_scrolling = True
new_pager.colors['border'] = term.cyan
new_pager.glyphs['right-vert'] = u'|'
new_pager.glyphs['left-vert'] = u'|'
new_pager.glyphs['bot-horiz'] = u''
return new_pager
def main(channel=None, caller=None):
""" Main procedure. """
# pylint: disable=R0914,R0912,W0603
# Too many local variables
# Too many branches
# Using the global statement
from x84.bbs import getsession, getterminal, getch, echo
session, term = getsession(), getterminal()
global CHANNEL, NICKS
CHANNEL = '#partyline' if channel is None else channel
NICKS = dict()
# sysop repy_to is -1 to force user, otherwise prompt
if channel == session.sid and caller not in (-1, None):
echo(u''.join((
term.normal, u'\a',
u'\r\n', term.clear_eol,
u'\r\n', term.clear_eol,
term.bold_green(u' ** '),
caller,
u' would like to chat, accept? ',
term.bold(u'['),
term.bold_green_underline(u'yn'),
term.bold(u']'),
)))
while True:
inp = getch()
if inp in (u'y', u'Y'):
break
elif inp in (u'n', u'N'):
return False
def refresh(pager, ipb, init=False):
""" Returns terminal sequence suitable for refreshing screen. """
session.activity = 'Chatting in %s' % (
CHANNEL if not CHANNEL.startswith('#')
and not 'sysop' in session.user.groups
else u'PRiVAtE ChANNEl',) if CHANNEL is not None else (
u'WAitiNG fOR ChAt')
pager.move_end()
return u''.join((
u''.join((u'\r\n', term.clear_eol,
u'\r\n', term.clear_eol,
term.bold_cyan(u'//'),
u' CitZENS bANd'.center(term.width).rstrip(),
term.clear_eol,
(u'\r\n' + term.clear_eol) * (pager.height + 2),
pager.border())) if init else u'',
pager.title(u''.join((
term.bold_cyan(u']- '),
CHANNEL if CHANNEL is not None else u'',
term.bold_cyan(u' -['),))),
pager.refresh(),
ipb.refresh(),))
def process_cmd(pager, msg):
""" Process command recieved and display result in chat window. """
cmd, args = msg.split()[0], msg.split()[1:]
# pylint: disable=W0603
# Using the global statement
global CHANNEL, NICKS, EXIT
if cmd.lower() == '/help':
pager.append(show_help())
return True
elif cmd.lower() == '/join' and len(args) == 1:
part_chan('lEAViNG fOR ANOthER ChANNEl')
CHANNEL = args[0]
NICKS = dict()
join_chan()
return True
elif cmd.lower() in ('/act', '/me',):
act(u' '.join(args))
elif cmd.lower() == '/say':
say(u' '.join(args))
elif cmd.lower() == '/part':
part_chan(u' '.join(args))
CHANNEL = None
NICKS = dict()
return True
elif cmd.lower() == '/quit':
part_chan('quit')
EXIT = True
elif cmd.lower() == '/users':
pager.append(show_nicks(NICKS.keys()))
return True
elif cmd.lower() == '/whois' and len(args) == 1:
whois(args[0])
return False
def broadcast_cc(payload):
""" Broadcast chat even, carbon copy ourselves. """
session.send_event('global', ('chat', payload))
session.buffer_event('global', ('chat', payload))
def join_chan():
""" Bradcast chat even for /join. """
payload = (session.sid, CHANNEL, (session.user.handle, 'join', None))
broadcast_cc(payload)
def say(mesg):
""" Signal chat event for /say. """
payload = (session.sid, CHANNEL, (session.user.handle, 'say', mesg))
broadcast_cc(payload)
def act(mesg):
""" Signal chat event for /act. """
payload = (session.sid, CHANNEL, (session.user.handle, 'act', mesg))
broadcast_cc(payload)
def part_chan(reason):
""" Signal chat event for /part. """
payload = (session.sid, CHANNEL, (session.user.handle, 'part', reason))
broadcast_cc(payload)
def whois(handle):
""" Perform /whois request for ``handle``. """
if not handle in NICKS:
return
session.send_event('route', (NICKS[handle], 'info-req', session.sid,))
def whois_response(attrs):
""" Display /whois response for given ``attrs``. """
return show_whois(attrs)
pager = get_pager(None) # output window
readline = get_inputbar(pager) # input bar
echo(refresh(pager, readline, init=True))
echo(pager.append("tYPE '/quit' tO EXit."))
dirty = time.time()
join_chan()
while not EXIT:
inp = getch(POLL_KEY)
# poll for and process screen resize
if session.poll_event('refresh') or (
inp in (term.KEY_REFRESH, unichr(12))):
pager = get_pager(pager)
saved_inp = readline.content
readline = get_inputbar(pager)
readline.content = saved_inp
echo(refresh(pager, readline, init=True))
dirty = None
# poll for and process chat events,
mesg = session.poll_event('global')
if mesg is not None:
otxt = process(mesg[1])
if 0 != len(otxt):
echo(pager.append(otxt))
dirty = None if dirty is None else time.time()
# poll for whois response
data = session.poll_event('info-ack')
if data is not None:
# session id, attributes = data
echo(pager.append(whois_response(data[1])))
dirty = None if dirty is None else time.time()
# process keystroke as input, or, failing that,
# as a command key to the pager. refresh portions of
# input bar or act on cariage return, accordingly.
elif inp is not None:
otxt = readline.process_keystroke(inp)
if readline.carriage_returned:
if readline.content.startswith('/'):
if process_cmd(pager, readline.content):
pager = get_pager(pager)
echo(refresh(pager, readline, init=True))
elif (0 != len(readline.content.strip())
and CHANNEL is not None):
say(readline.content)
readline = get_inputbar(pager)
echo(readline.refresh())
elif 0 == len(otxt):
if type(inp) is int:
echo(pager.process_keystroke(inp))
else:
echo(u''.join((
readline.fixate(-1),
readline.colors.get('highlight', u''),
otxt, term.normal)))
# update pager contents. Its a lot for 9600bps ..
if dirty is not None and time.time() - dirty > POLL_OUT:
echo(refresh(pager, readline))
dirty = None
echo(u''.join((term.move(term.height, 0), term.normal)))
return True
| {
"content_hash": "9ae1e332034ca3b4d0c90f888bafa31f",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 79,
"avg_line_length": 35.857868020304565,
"alnum_prop": 0.5375849377123443,
"repo_name": "x84-extras/nodechat",
"id": "67cbfd0b807dc84321bfbda74eb99d77757e48f3",
"size": "14128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nodechat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14128"
}
],
"symlink_target": ""
} |
import Gaffer
import GafferUI
class TextInputDialogue( GafferUI.Dialogue ) :
def __init__( self, initialText="", title="Enter text", cancelLabel="Cancel", confirmLabel="OK", **kw ) :
GafferUI.Dialogue.__init__( self, title, sizeMode=GafferUI.Window.SizeMode.Fixed, **kw )
self.__textWidget = GafferUI.TextWidget( initialText )
self.__textWidget.setSelection( None, None ) # all text
self._setWidget( self.__textWidget )
self.__textActivatedConnection = self.__textWidget.activatedSignal().connect( Gaffer.WeakMethod( self.__textActivated ) )
self.__cancelButton = self._addButton( cancelLabel )
self.__confirmButton = self._addButton( confirmLabel )
def waitForText( self, **kw ) :
self.__textWidget.grabFocus()
button = self.waitForButton( **kw )
if button is self.__confirmButton :
return self.__textWidget.getText()
return None
def __textActivated( self, textWidget ) :
assert( textWidget is self.__textWidget )
self.__confirmButton.clickedSignal()( self.__confirmButton )
| {
"content_hash": "c59f80ad3d9043e591628d81d3bf9090",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 123,
"avg_line_length": 31.875,
"alnum_prop": 0.7147058823529412,
"repo_name": "cedriclaunay/gaffer",
"id": "fb3ffdaeb05d3a9132027dc8b5d1288a572949f0",
"size": "2885",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "python/GafferUI/TextInputDialogue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8478"
},
{
"name": "C++",
"bytes": "3754297"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Python",
"bytes": "3726245"
},
{
"name": "Shell",
"bytes": "7956"
},
{
"name": "Slash",
"bytes": "39241"
}
],
"symlink_target": ""
} |
'''
This module provides an ISO 8601:2004 duration parser.
It also provides a wrapper to strftime. This wrapper makes it easier to
format timedelta or Duration instances as ISO conforming strings.
'''
from datetime import timedelta
from decimal import Decimal
import re
from .duration import Duration
from .isoerror import ISO8601Error
from .isodatetime import parse_datetime
from .isostrf import strftime, D_DEFAULT
ISO8601_PERIOD_REGEX = re.compile(r"^(?P<sign>[+-])?"
r"P(?P<years>[0-9]+([,.][0-9]+)?Y)?"
r"(?P<months>[0-9]+([,.][0-9]+)?M)?"
r"(?P<weeks>[0-9]+([,.][0-9]+)?W)?"
r"(?P<days>[0-9]+([,.][0-9]+)?D)?"
r"((?P<separator>T)(?P<hours>[0-9]+([,.][0-9]+)?H)?"
r"(?P<minutes>[0-9]+([,.][0-9]+)?M)?"
r"(?P<seconds>[0-9]+([,.][0-9]+)?S)?)?$")
# regular expression to parse ISO duartion strings.
def parse_duration(datestring):
"""
Parses an ISO 8601 durations into datetime.timedelta or Duration objects.
If the ISO date string does not contain years or months, a timedelta
instance is returned, else a Duration instance is returned.
The following duration formats are supported:
-PnnW duration in weeks
-PnnYnnMnnDTnnHnnMnnS complete duration specification
-PYYYYMMDDThhmmss basic alternative complete date format
-PYYYY-MM-DDThh:mm:ss extended alternative complete date format
-PYYYYDDDThhmmss basic alternative ordinal date format
-PYYYY-DDDThh:mm:ss extended alternative ordinal date format
The '-' is optional.
Limitations: ISO standard defines some restrictions about where to use
fractional numbers and which component and format combinations are
allowed. This parser implementation ignores all those restrictions and
returns something when it is able to find all necessary components.
In detail:
it does not check, whether only the last component has fractions.
it allows weeks specified with all other combinations
The alternative format does not support durations with years, months or
days set to 0.
"""
if not isinstance(datestring, basestring):
raise TypeError("Expecting a string %r" % datestring)
match = ISO8601_PERIOD_REGEX.match(datestring)
if not match:
# try alternative format:
if datestring.startswith("P"):
durdt = parse_datetime(datestring[1:])
if durdt.year != 0 or durdt.month != 0:
# create Duration
ret = Duration(days=durdt.day, seconds=durdt.second,
microseconds=durdt.microsecond,
minutes=durdt.minute, hours=durdt.hour,
months=durdt.month, years=durdt.year)
else: # FIXME: currently not possible in alternative format
# create timedelta
ret = timedelta(days=durdt.day, seconds=durdt.second,
microseconds=durdt.microsecond,
minutes=durdt.minute, hours=durdt.hour)
return ret
raise ISO8601Error("Unable to parse duration string %r" % datestring)
groups = match.groupdict()
for key, val in groups.items():
if key not in ('separator', 'sign'):
if val is None:
groups[key] = "0n"
#print groups[key]
if key in ('years', 'months'):
groups[key] = Decimal(groups[key][:-1].replace(',', '.'))
else:
# these values are passed into a timedelta object, which works with floats.
groups[key] = float(groups[key][:-1].replace(',', '.'))
if groups["years"] == 0 and groups["months"] == 0:
ret = timedelta(days=groups["days"], hours=groups["hours"],
minutes=groups["minutes"], seconds=groups["seconds"],
weeks=groups["weeks"])
if groups["sign"] == '-':
ret = timedelta(0) - ret
else:
ret = Duration(years=groups["years"], months=groups["months"],
days=groups["days"], hours=groups["hours"],
minutes=groups["minutes"], seconds=groups["seconds"],
weeks=groups["weeks"])
if groups["sign"] == '-':
ret = Duration(0) - ret
return ret
def duration_isoformat(tduration, format=D_DEFAULT):
'''
Format duration strings.
This method is just a wrapper around isodate.isostrf.strftime and uses
P%P (D_DEFAULT) as default format.
'''
# TODO: implement better decision for negative Durations.
# should be done in Duration class in consistent way with timedelta.
if ((isinstance(tduration, Duration) and (tduration.years < 0 or
tduration.months < 0 or
tduration.tdelta < timedelta(0)))
or (isinstance(tduration, timedelta) and (tduration < timedelta(0)))):
ret = '-'
else:
ret = ''
ret += strftime(tduration, format)
return ret
| {
"content_hash": "3d0d5491cf086651242adacfa8ee9141",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 91,
"avg_line_length": 43.72268907563025,
"alnum_prop": 0.5862002690755334,
"repo_name": "frankyrumple/smc",
"id": "e5430773af0389dbe04b6546c53ef73308b82720",
"size": "6749",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/isodate/isoduration.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "263"
},
{
"name": "C",
"bytes": "631"
},
{
"name": "CSS",
"bytes": "48816"
},
{
"name": "HTML",
"bytes": "155252"
},
{
"name": "JavaScript",
"bytes": "339188"
},
{
"name": "Python",
"bytes": "2976944"
}
],
"symlink_target": ""
} |
from sympy import Matrix, I, Real, Integer
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.state import Bra, Ket
from sympy.physics.quantum.operator import Operator, OuterProduct
from sympy.physics.quantum.tensorproduct import TensorProduct
from sympy.physics.quantum.tensorproduct import matrix_tensor_product
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.innerproduct import InnerProduct
from sympy.physics.quantum.matrixutils import (
to_sympy, to_numpy, to_scipy_sparse, numpy_ndarray, scipy_sparse_matrix
)
Amat = Matrix([[1,I],[-I,1]])
Bmat = Matrix([[1,2],[3,4]])
Avec = Matrix([[1],[I]])
class AKet(Ket):
@property
def dual_class(self):
return ABra
def _represent_default_basis(self, **options):
return self._represent_AOp(None, **options)
def _represent_AOp(self, basis, **options):
return Avec
class ABra(Bra):
@property
def dual_class(self):
return AKet
class AOp(Operator):
def _represent_default_basis(self, **options):
return self._represent_AOp(None, **options)
def _represent_AOp(self, basis, **options):
return Amat
class BOp(Operator):
def _represent_default_basis(self, **options):
return self._represent_AOp(None, **options)
def _represent_AOp(self, basis, **options):
return Bmat
k = AKet('a')
b = ABra('a')
A = AOp('A')
B = BOp('B')
_tests = [
# Bra
(b, Dagger(Avec)),
(Dagger(b), Avec),
# Ket
(k, Avec),
(Dagger(k), Dagger(Avec)),
# Operator
(A, Amat),
(Dagger(A), Dagger(Amat)),
# OuterProduct
(OuterProduct(k,b), Avec*Avec.H),
# TensorProduct
(TensorProduct(A,B), matrix_tensor_product(Amat,Bmat)),
# Pow
(A**2, Amat**2),
# Add/Mul
(A*B + 2*A, Amat*Bmat + 2*Amat),
# Commutator
(Commutator(A,B), Amat*Bmat - Bmat*Amat),
# AntiCommutator
(AntiCommutator(A,B), Amat*Bmat + Bmat*Amat),
# InnerProduct
(InnerProduct(b,k), (Avec.H*Avec)[0])
]
def test_format_sympy():
for test in _tests:
lhs = represent(test[0], basis=A, format='sympy')
rhs = to_sympy(test[1])
assert lhs == rhs
def test_scalar_sympy():
assert represent(Integer(1)) == Integer(1)
assert represent(Real(1.0)) == Real(1.0)
assert represent(1.0+I) == 1.0+I
try:
import numpy as np
except ImportError:
pass
else:
def test_format_numpy():
for test in _tests:
lhs = represent(test[0], basis=A, format='numpy')
rhs = to_numpy(test[1])
if isinstance(lhs, numpy_ndarray):
assert (lhs == rhs).all()
else:
assert lhs == rhs
def test_scalar_numpy():
assert represent(Integer(1), format='numpy') == 1
assert represent(Real(1.0), format='numpy') == 1.0
assert represent(1.0+I, format='numpy') == 1.0+1.0j
try:
import numpy as np
from scipy import sparse
except ImportError:
pass
else:
def test_format_scipy_sparse():
for test in _tests:
lhs = represent(test[0], basis=A, format='scipy.sparse')
rhs = to_scipy_sparse(test[1])
if isinstance(lhs, scipy_sparse_matrix):
assert np.linalg.norm((lhs-rhs).todense()) == 0.0
else:
assert lhs == rhs
def test_scalar_scipy_sparse():
assert represent(Integer(1), format='scipy.sparse') == 1
assert represent(Real(1.0), format='scipy.sparse') == 1.0
assert represent(1.0+I, format='scipy.sparse') == 1.0+1.0j
| {
"content_hash": "3a942554134b7089b88b0c519717c27e",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 75,
"avg_line_length": 26.65,
"alnum_prop": 0.6196730099169123,
"repo_name": "tarballs-are-good/sympy",
"id": "f0634911ada56796b54228080beab3b00216b48f",
"size": "3731",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/physics/quantum/tests/test_represent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import re
import requests
from bs4 import BeautifulSoup
from metro.parser.base import BaseRuDataProvider
class DataProvider(BaseRuDataProvider):
metro_data_src = "http://ru.wikipedia.org/wiki/\
Список_станций_Казанского_метрополитена"
def download_all(self):
self.parse_usual_big_table()
| {
"content_hash": "46b42c83108dfc3dfca91cfbe67c1b86",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 63,
"avg_line_length": 25.46153846153846,
"alnum_prop": 0.7129909365558912,
"repo_name": "xfenix/django-metro",
"id": "f35f61685dbde6c7f4c37ffe444e2fe951d9ddca",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metro/parser/providers/kazan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27822"
}
],
"symlink_target": ""
} |
import urllib, os, gzip, struct
import numpy as np
def parseMNIST(dataPath, labelsPath):
dataFile = gzip.GzipFile(dataPath)
labelFile = gzip.GzipFile(labelsPath)
# push out the first 4 useless bytes
struct.unpack('>i',dataFile.read(4))[0]
numImgs, width, height = struct.unpack('>III',dataFile.read(12))
# push out the first 8 bytes of the labels file
struct.unpack('>II',labelFile.read(8))
# print useful output
print "Loading %s images, each %s by %s" % (numImgs,width,height)
# allocate memory
labels = np.zeros(numImgs)
data = np.zeros((numImgs, width*height))
# load data and labels
for i in xrange(numImgs):
labels[i] = struct.unpack('B',labelFile.read(1))[0]
d = dataFile.read(width*height)
data[i,:] = np.array(struct.unpack('B'*width*height,d))
print 'Done'
return data, labels
if __name__ == "__main__":
dataDir = 'data'
try:
os.mkdir(dataDir)
except OSError:
pass
## Download all data
# MNIST Data
data = [('MNIST_Training_Data','http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'),
('MNIST_Training_Labels','http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz'),
('MNIST_Test_Data','http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz'),
('MNIST_Test_Labels','http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz')]
for filename, url in data:
path = os.path.join(dataDir, filename + '_Raw.' + url.split('.')[-1])
# check if the path already exists
if os.path.exists(path):
print "skipping %-25s file already exists" % filename
else:
# download data
print "Downloading %s to %s..." % (filename, path)
urllib.urlretrieve(url,path)
print "%s successfully downloaded" % filename
print "All data successfully downloaded, stored in directory: %s" % (dataDir)
print "\nUnpacking MNIST data to Numpy arrays"
MNISTData = [("MNIST_Training_Data","MNIST_Training_Labels"),("MNIST_Test_Data","MNIST_Test_Labels")]
for d,l in MNISTData:
# determine the paths
d_path_in = os.path.join(dataDir, d + '_Raw.gz')
d_path_out = os.path.join(dataDir, d)
l_path_in = os.path.join(dataDir, l + '_Raw.gz')
l_path_out = os.path.join(dataDir, l)
# don't do extra work
if os.path.exists(d_path_out) and os.path.exists(l_path_out):
print "Skipping %-19s and %-21s files already exist" % (d,l)
continue
print "Unpacking %s and %s" % (d,l)
# unpack the data
npData, npLabels = parseMNIST(d_path_in,l_path_in)
print "saving data to %s" % d_path_out
np.save(d_path_out,npData)
print "saving labels to %s" % l_path_out
np.save(l_path_out,npLabels)
print "All data successfully unpacked, stored in directory: %s" % (dataDir) | {
"content_hash": "3c7d6ec5756c5a0dd48829cbf60603a9",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 102,
"avg_line_length": 33.175,
"alnum_prop": 0.6831198191409193,
"repo_name": "pvarin/MachineLearning",
"id": "e40546e8567fdb2d9b3df8d49ece80ee10b44113",
"size": "2654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getData.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6270"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_file_copy
short_description: Manage files in datastores on a BIG-IP
description:
- Manages files on a variety of datastores on a BIG-IP.
version_added: "1.0.0"
options:
name:
description:
- The name of the file as it should reside on the BIG-IP.
- If this is not specified, then the filename provided in the C(source)
parameter is used instead.
type: str
source:
description:
- Specifies the path of the file to upload.
- This parameter is required if C(state) is C(present).
type: path
aliases:
- src
datastore:
description:
- Specifies the datastore to put the file in.
- There are several different datastores and each of them allows files
to be exposed in different ways.
- When C(external-monitor), the specified file will be stored as
an external monitor file and be available for use in external monitors.
- When C(ifile), the specified file will be stored as an iFile.
- When C(lw4o6-table), the specified file will be stored as a Lightweight 4
over 6 (lw4o6) tunnel binding table, which includes an IPv6 address for the
lwB4, public IPv4 address, and restricted port set.
type: str
choices:
- external-monitor
- ifile
- lw4o6-table
default: ifile
force:
description:
- Force overwriting a file.
- By default, files will only be overwritten if the SHA of the file is different
for the given filename. This parameter can be used to force overwriting the file
even if it already exists and its SHA matches.
- The C(lw4o6-table) datastore does not keep checksums of its file. Therefore, you
would need to provide this argument to update any of these files.
type: bool
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures the resource exists.
- When C(absent), ensures the resource is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Upload a file as an iFile
bigip_file_copy:
name: foo
source: /path/to/file.txt
datastore: ifile
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
# Upload a directory of files
- name: Recursively upload web related files in /var/tmp/project
find:
paths: /var/tmp/project
patterns: "^.*?\\.(?:html|?:css|?:js)$"
use_regex: yes
register: f
- name: Upload a directory of files as a set of iFiles
bigip_file_copy:
source: "{{ item.path }}"
datastore: ifile
provider:
password: secret
server: lb.mydomain.com
user: admin
loop: f
delegate_to: localhost
# End upload a directory of files
- name: Upload a file to use in an external monitor
bigip_file_copy:
source: /path/to/files/external.sh
datastore: external-monitor
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import hashlib
import os
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec
)
from ..module_utils.icontrol import (
upload_file, tmos_version
)
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
]
returnables = [
]
updatables = [
'checksum',
]
class ApiParameters(Parameters):
@property
def checksum(self):
"""Returns a plain checksum value without the leading extra characters
Values are stored in the REST as the following.
``"checksum": "SHA1:77002:b84015799949ac4acad87b81691455242a31e894"``
Returns:
string: The parsed SHA1 checksum.
"""
if self._values['checksum'] is None:
return None
return str(self._values['checksum'].split(':')[2])
class ModuleParameters(Parameters):
@property
def checksum(self):
"""Return SHA1 checksum of the file on disk
Returns:
string: The SHA1 checksum of the file.
References:
- https://stackoverflow.com/a/22058673/661215
"""
if self._values['datastore'] == 'lw4o6-table':
return None
sha1 = hashlib.sha1()
with open(self._values['source'], 'rb') as f:
while True:
data = f.read(4096)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
@property
def name(self):
if self._values['name'] is not None:
return self._values['name']
if self._values['source'] is None:
return None
return os.path.basename(self._values['source'])
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update() and not self.want.force:
return False
if self.module.check_mode:
return True
self.remove_from_device()
self.upload_to_device()
self.create_on_device()
self.remove_uploaded_file_from_device(self.want.name)
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
if self.module.check_mode:
return True
self.upload_to_device()
self.create_on_device()
self.remove_uploaded_file_from_device(self.want.name)
return True
def absent(self):
if self.exists():
return self.remove()
return False
def upload_to_device(self):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, self.want.source, self.want.name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def remove_uploaded_file_from_device(self, name):
filepath = '/var/config/rest/downloads/{0}'.format(name)
params = {
"command": "run",
"utilCmdArgs": filepath
}
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
class IFileManager(BaseManager):
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['source-path'] = 'file:/var/config/rest/downloads/{0}'.format(self.want.name)
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/file/ifile/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/ifile/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/ifile/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/ifile/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ExternalMonitorManager(BaseManager):
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['source-path'] = 'file:/var/config/rest/downloads/{0}'.format(self.want.name)
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/file/external-monitor/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/external-monitor/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/external-monitor/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/external-monitor/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class Lw4o6Manager(BaseManager):
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['source-path'] = 'file:/var/config/rest/downloads/{0}'.format(self.want.name)
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/file/lwtunneltbl/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/lwtunneltbl/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/lwtunneltbl/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/lwtunneltbl/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if self.module.params['datastore'] == 'ifile':
manager = self.get_manager('v1')
elif self.module.params['datastore'] == 'external-monitor':
manager = self.get_manager('v2')
elif self.module.params['datastore'] == 'lw4o6-table':
manager = self.get_manager('v3')
else:
raise F5ModuleError(
"Unknown datastore specified."
)
return manager.exec_module()
def get_manager(self, type):
if type == 'v1':
return IFileManager(**self.kwargs)
elif type == 'v2':
return ExternalMonitorManager(**self.kwargs)
elif type == 'v3':
return Lw4o6Manager(**self.kwargs)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(),
source=dict(
type='path',
aliases=['src'],
),
datastore=dict(
choices=[
'external-monitor',
'ifile',
'lw4o6-table',
],
default='ifile'
),
force=dict(type='bool', default='no'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_if = [
['state', 'present', ['source']]
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_if=spec.required_if,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| {
"content_hash": "972a5be4e4ae95a8bf33f795ededdfd5",
"timestamp": "",
"source": "github",
"line_count": 688,
"max_line_length": 94,
"avg_line_length": 31.575581395348838,
"alnum_prop": 0.5786227214141042,
"repo_name": "F5Networks/f5-ansible-modules",
"id": "3aec05593960db636ab78d0e5e8e1cdbfbb446d1",
"size": "21901",
"binary": false,
"copies": "1",
"ref": "refs/heads/doc-update",
"path": "ansible_collections/f5networks/f5_modules/plugins/modules/bigip_file_copy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1931"
},
{
"name": "Python",
"bytes": "345682"
}
],
"symlink_target": ""
} |
from lia.common.LiaTestCase import LiaTestCase
from lucene import \
SimpleAnalyzer, Term, IndexSearcher, TermQuery, \
Highlighter, QueryScorer, StringReader, Version
class HighlightTest(LiaTestCase):
def testHighlighting(self):
text = "The quick brown fox jumps over the lazy dog"
query = TermQuery(Term("field", "fox"))
scorer = QueryScorer(query)
highlighter = Highlighter(scorer)
tokenStream = SimpleAnalyzer(Version.LUCENE_CURRENT).tokenStream("field", StringReader(text))
self.assertEqual("The quick brown <B>fox</B> jumps over the lazy dog",
highlighter.getBestFragment(tokenStream, text))
def testHits(self):
searcher = self.getSearcher()
query = TermQuery(Term("title", "action"))
scoreDocs = searcher.search(query, 50).scoreDocs
scorer = QueryScorer(query)
highlighter = Highlighter(scorer)
for scoreDoc in scoreDocs:
doc = searcher.doc(scoreDoc.doc)
title = doc["title"]
stream = SimpleAnalyzer(Version.LUCENE_CURRENT).tokenStream("title", StringReader(title))
fragment = highlighter.getBestFragment(stream, title)
print fragment
| {
"content_hash": "637ad1c01919bf9c225db9df277b57c6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 101,
"avg_line_length": 33.05263157894737,
"alnum_prop": 0.6544585987261147,
"repo_name": "romanchyla/pylucene-trunk",
"id": "1170aab892bebd03c95ad47ec2e0cbf6a69410ff",
"size": "1961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/LuceneInAction/lia/tools/HighlightTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "76100"
},
{
"name": "C++",
"bytes": "350489"
},
{
"name": "Java",
"bytes": "54533"
},
{
"name": "Python",
"bytes": "388266"
}
],
"symlink_target": ""
} |
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "v"
parentdir_prefix = "foobar-"
versionfile_source = "foobar/_version.py"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return rep_by_pep440(ver)
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split('/'))):
root = os.path.dirname(root)
except NameError:
return default
return rep_by_pep440(
git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
def git2pep440(ver_str):
dash_count = ver_str.count('-')
if dash_count == 0:
return ver_str
elif dash_count == 1:
return ver_str.split('-')[0] + ".post.dev1.pre"
elif dash_count == 2:
tag, commits, _ = ver_str.split('-')
return ".post.dev".join([tag, commits])
elif dash_count == 3:
tag, commits, _, _ = ver_str.split('-')
commits = str(int(commits) + 1)
return ".post.dev".join([tag, commits]) + ".pre"
else:
raise RuntimeError("Invalid version string")
def rep_by_pep440(ver):
if ver["full"]: # only if versions_from_parentdir was not used
ver["version"] = git2pep440(ver["version"])
else:
ver["version"] = ver["version"].split('-')[0]
return ver
| {
"content_hash": "87729830534fa387ea52ea8f7c73e6f0",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 87,
"avg_line_length": 38.792929292929294,
"alnum_prop": 0.5826064314542377,
"repo_name": "martin-hunt/foobar",
"id": "b82077ca7c76633c63dba63ff60bc80858d9615c",
"size": "8215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foobar/_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54037"
},
{
"name": "Shell",
"bytes": "1371"
}
],
"symlink_target": ""
} |
from pyarm.model.kinematics import finite_difference_method as kinematics
from pyarm import fig
import math
import numpy as np
import warnings
ASSERT = False
class AbstractArmModel:
"""Abstract forward dynamics arm model.
References :
[1] M. Katayama and M. Kawato.
"Virtual trajectory and stiffness ellipse during multijoint arm movement
predicted by neural inverse models".
Biological Cybernetics, 69(5):353-362, 1993.
"""
# STATE VARIABLES #########################################################
velocities = None # Angular velocity (rd/s)
angles = None # Joint angle (rd)
# CONSTANTS ###############################################################
name = 'Abstract'
joints = ('shoulder', 'elbow')
# Bound values ##############################
bounds = {
# Angular acceleration (rd/s²)
'angular_acceleration': {'min': -128. * math.pi,
'max': 128. * math.pi},
# Angular velocity (rd/s) from [3] p.19
'angular_velocity': {'min': -8. * math.pi,
'max': 8. * math.pi},
# Total torque (N.m)
'torque': {'min': -200, 'max': 200}
}
# Min and max joint angles (rd)
angle_constraints = [
# Shoulder
{'min': math.radians(-30),
'max': math.radians(140)},
# Elbow
{'min': math.radians(0),
'max': math.radians(160)}
]
# Initial joint angles
# Functional standard posture (rd) from [6] p.356-357
initial_angles = [math.radians(45), math.radians(70)]
# Arm parameters ############################
upperarm_mass = None # Upperarm mass (kg)
forearm_mass = None # Forearm mass (kg)
upperarm_length = None # Upperarm length (m)
forearm_length = None # Forearm length (m)
# Distance from the upperarm joint center to the upperarm center of mass (m)
upperarm_cog = None
# Distance from the forearm joint center to the forearm center of mass (m)
forearm_cog = None
shoulder_inertia = None # Moment of inertia at shoulder join (kg·m²)
elbow_inertia = None # Moment of inertia at elbow join (kg·m²)
g = None # Gravitational acceleration (m/s²)
friction_matrix = np.array([[0.05, 0.025], [0.025, 0.05]])
unbounded = False
###########################################################################
def __init__(self, unbounded=False):
self.unbounded = unbounded
self.velocities = np.zeros(2)
angles = np.array(self.initial_angles)
self.angles = self.constraint_joint_angles(angles)
# Init datas to plot
fig.subfig('M',
title='M',
xlabel='time (s)',
ylabel='M',
legend=('M11', 'M12', 'M21', 'M22'))
fig.subfig('C',
title='C',
xlabel='time (s)',
ylabel='C',
legend=self.joints)
fig.subfig('B',
title='B',
xlabel='time (s)',
ylabel='B',
legend=self.joints)
fig.subfig('G',
title='G',
xlabel='time (s)',
ylabel='G',
legend=self.joints)
fig.subfig('N',
title='N',
xlabel='time (s)',
ylabel='Normal force',
legend=self.joints)
fig.subfig('torque',
title='Torque',
xlabel='time (s)',
ylabel='Torque (N.m)',
legend=self.joints)
fig.subfig('tCBG',
title='torque - (C + B + G)',
xlabel='time (s)',
ylabel='Tau - (C + B + G)',
legend=self.joints)
fig.subfig('angular_acceleration',
title='Angular acceleration',
xlabel='time (s)',
ylabel='Acceleration (rad/s/s)',
legend=self.joints)
fig.subfig('angular_velocity',
title='Angular velocity',
xlabel='time (s)',
ylabel='Velocity (rad/s)',
legend=self.joints)
fig.subfig('joint_angles',
title='Angle',
xlabel='time (s)',
ylabel='Angle (rad)',
legend=self.joints)
fig.subfig('position',
title='Position',
xlabel='time (s)',
ylabel='Position (m)',
legend=('shoulder x', 'shoulder y',
'elbow x', 'elbow y',
'wrist x', 'wrist y'))
def compute_acceleration(self, torque, delta_time):
"Compute the arm dynamics."
# Load state
angles = self.angles.copy()
velocities = self.velocities.copy()
# Collision detection
if not self.unbounded:
collision_flags = self.collision_detection(angles.copy(), # TODO
velocities.copy(), # TODO
torque.copy(), # TODO
delta_time)
# Angular acceleration (rad/s²)
# From [1] p.3, [3] p.4 and [6] p.354
M = self.M(angles)
C = self.C(angles, velocities)
B = self.B(velocities)
G = self.G(angles)
normal_force = np.zeros(2)
if not self.unbounded:
filter = [float(flag) for flag in collision_flags]
normal_force = np.array(filter) * (-torque + C + B + G) # TODO
accelerations = np.dot(np.linalg.inv(M), torque - C - B - G + normal_force) # TODO
self.assert_bounds('angular_acceleration', accelerations)
# Forward kinematics
velocities, angles = kinematics.forward_kinematics(accelerations,
velocities,
angles,
delta_time)
self.assert_bounds('angular_velocity', velocities)
if not self.unbounded:
filter = [float(not flag) for flag in collision_flags]
velocities = np.array(filter) * velocities # TODO
angles = self.constraint_joint_angles(angles) # TODO # REMOVE IT #
# Plot values
fig.append('M', M.flatten())
fig.append('C', C)
fig.append('B', B)
fig.append('G', G)
fig.append('N', normal_force)
fig.append('torque', torque)
fig.append('tCBG', torque - C - B - G)
fig.append('angular_acceleration', accelerations)
fig.append('angular_velocity', velocities)
fig.append('joint_angles', angles)
fig.append('position', np.concatenate((self.joints_position())))
# Save state
self.angles = angles
self.velocities = velocities
return accelerations
def M(self, theta):
"Compute inertia matrix."
if theta.shape != (2,):
raise TypeError('Theta : shape is ' + str(theta.shape) \
+ ' ((2,) expected)')
f1 = self.shoulder_inertia + self.elbow_inertia \
+ self.forearm_mass * self.upperarm_length**2
f2 = self.forearm_mass * self.upperarm_length * self.forearm_cog
f3 = self.elbow_inertia
M = np.zeros([2, 2])
M[0, 0] = f1 + 2. * f2 * math.cos(theta[1])
M[0, 1] = f3 + f2 * math.cos(theta[1])
M[1, 0] = f3 + f2 * math.cos(theta[1])
M[1, 1] = f3
return M
def C(self, theta, omega):
"Compute centripedal and coriolis forces matrix."
if theta.shape != (2,):
raise TypeError('Theta : shape is ' + str(theta.shape) \
+ ' ((2,) expected)')
if omega.shape != (2,):
raise TypeError('Omega : shape is ' + str(omega.shape) \
+ ' ((2,) expected)')
f2 = self.forearm_mass * self.upperarm_length * self.forearm_cog
C = np.array([-omega[1] * (2. * omega[0] + omega[1]),
omega[0]**2] \
) * f2 * math.sin(theta[1])
return C
def B(self, omega):
"Compute joint friction matrix."
return np.dot(self.friction_matrix, omega)
def G(self, theta):
"Compute gravity force matrix."
if theta.shape != (2,):
raise TypeError('Theta : shape is ' + str(theta.shape) + ' ((2,) expected)')
G = np.zeros(2)
G[0] = self.upperarm_mass * self.g * self.upperarm_cog * \
math.cos(theta[0]) \
+ self.forearm_mass * self.g * \
(self.upperarm_length * math.cos(\
theta[0]) + self.forearm_cog * math.cos(theta[0] + theta[1]))
G[1] = self.forearm_mass * self.g * self.forearm_cog * math.cos(\
theta[0] + theta[1])
return G
def collision_detection(self, angles, velocities, torque, delta_time):
"""Compute angles in order to detect collisions.
Return True if angle value is out of range (collision) or False
otherwise."""
# Angular acceleration (rad/s²)
# From [1] p.3, [3] p.4 and [6] p.354
M = self.M(angles)
C = self.C(angles, velocities)
B = self.B(velocities)
G = self.G(angles)
accelerations = np.dot(np.linalg.inv(M), torque - C - B - G)
# Forward kinematics
velocities, angles = kinematics.forward_kinematics(accelerations,
velocities,
angles,
delta_time)
range_flags = self.assert_joint_angles(angles)
return [not flag for flag in range_flags]
def constraint_joint_angles(self, angles):
"Limit joint angles to respect constraint values."
for i in range(len(self.joints)):
angles[i] = max(angles[i], self.angle_constraints[i]['min'])
angles[i] = min(angles[i], self.angle_constraints[i]['max'])
return angles
def assert_joint_angles(self, angles):
"""Check if joint angles to respect constraint values.
Return True if angles values satisfy constraints or False otherwise."""
const = self.angle_constraints
return [const[i]['min'] < angles[i] < const[i]['max'] \
for i in range(len(self.joints))]
def assert_bounds(self, name, value):
"""Check if 'value' satisfy minimum and maximum value constraints
(bounds).
Arguments
- name : the key to reach constraints in 'bounds' dictionary.
- value : the values to assert (a numpy array).
"""
if ASSERT:
if name in self.bounds.keys():
assert value.min() >= self.bounds[name]['min'] \
and value.max() <= self.bounds[name]['max'], \
"%s is out of bounds values :\n" \
"- expected bounds : [%f, %f]\n" \
"- actual bounds : [%f, %f]\n" \
"\n%s" \
% (name,
self.bounds[name]['min'],
self.bounds[name]['max'],
value.min(),
value.max(),
value)
else:
warnings.warn("%s is not a valid key" % name)
def joints_position(self):
"Compute absolute position of elbow and wrist in operational space"
initial_angle = 0
shoulder_point = np.zeros(2)
shoulder_angle = self.angles[0]
elbow_angle = self.angles[1]
global_shoulder_angle = initial_angle + shoulder_angle
global_elbow_angle = global_shoulder_angle + elbow_angle
elbow_point = np.array([math.cos(global_shoulder_angle),
math.sin(global_shoulder_angle)]) \
* self.upperarm_length + shoulder_point
wrist_point = np.array([math.cos(global_elbow_angle),
math.sin(global_elbow_angle)]) \
* self.forearm_length + elbow_point
return shoulder_point, elbow_point, wrist_point
| {
"content_hash": "b0c4349bfd07e720ae2e20a7ffc88ebb",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 91,
"avg_line_length": 35.31404958677686,
"alnum_prop": 0.47866448240892423,
"repo_name": "jeremiedecock/pyarm",
"id": "4c27c1b8fed9f5effc10f6abad22176d5310fb05",
"size": "12913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyarm/model/arm/abstract_arm_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125519"
},
{
"name": "Shell",
"bytes": "1449"
}
],
"symlink_target": ""
} |
"""
This Python Script calls the download script in the common folder of eurostat,
the download script takes INPUT_URLs and current directory as input
and downloads the files.
"""
import os
import sys
from absl import app, flags
# pylint: disable=import-error
# pylint: disable=wrong-import-position
# For import common.download
_COMMON_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(1, _COMMON_PATH)
from common import import_download_details, download
# pylint: enable=import-error
# pylint: enable=wrong-import-position
_FLAGS = flags.FLAGS
flags.DEFINE_enum("import_name", None, [
"alcohol_consumption", "tobacco_consumption", "physical_activity", "bmi",
"social_environment", "fruits_vegetables"
], "Import name for which input files to be downloaded")
flags.mark_flag_as_required("import_name")
def main(_):
download_details = import_download_details.download_details[
_FLAGS.import_name]
download_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', _FLAGS.import_name,
"input_files"))
os.makedirs(download_path, exist_ok=True)
for file in download_details["filenames"]:
download_files_urls = [
download_details["input_url"] + str(file) +
download_details["file_extension"]
]
download.download_files(download_files_urls, download_path)
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "78abc48578fb2307285677dc03f49de4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 32.46666666666667,
"alnum_prop": 0.6851471594798083,
"repo_name": "datacommonsorg/data",
"id": "9cd4c229654334e23a723fa1416484e8bff5d0ab",
"size": "2036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/eurostat/health_determinants/common/download_eurostat_input_files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "78"
},
{
"name": "Go",
"bytes": "51446"
},
{
"name": "HTML",
"bytes": "32842253"
},
{
"name": "JavaScript",
"bytes": "458"
},
{
"name": "Jupyter Notebook",
"bytes": "5088443"
},
{
"name": "Python",
"bytes": "3723204"
},
{
"name": "R",
"bytes": "28607"
},
{
"name": "Shell",
"bytes": "25468"
},
{
"name": "TypeScript",
"bytes": "13472"
}
],
"symlink_target": ""
} |
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
# open home page
self.app.open_home_page()
# complete login form
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
def is_logged_in_as(self, username):
wd = self.app.wd
return wd.find_element_by_xpath("//form/b").text == "("+username+")"
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
| {
"content_hash": "14fd882314727d4770967d052af119bf",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 76,
"avg_line_length": 30.931818181818183,
"alnum_prop": 0.5584129316678913,
"repo_name": "kKhilko/addressbook_python",
"id": "bda00db1b5ddedf813e07f254e71b87d00621bbe",
"size": "1363",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fixture/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "38530"
}
],
"symlink_target": ""
} |
"""Queries all persons in kommittees."""
# imports
import sys
import os
import sqlalchemy
import datetime
sys.path.append(os.getcwd()) # Add . to path
sys.path.append(os.path.dirname(os.getcwd())) # Add .. to path
from backend.ldaplogin import (get_member_with_real_name,
DuplicateNamesException, PersonNotFoundException)
from backend import connect
from backend.orm import *
import common
import passwordsafe
# constants
# exception classes
# interface functions
# classes
# internal functions & classes
def main():
ps = passwordsafe.PasswordSafe()
SessionMaker = ps.connect_with_config("members")
session = SessionMaker()
groups = session.query(Group).filter(
Group.name_fld=="Styrelsen")
with open("test.csv", "w") as f:
for group in groups:
for membership in group.memberships:
f.write(str(membership.startTime_fld.date().year) + "\t"
+ membership.member.getName() + "\t" +
membership.member.contactinfo.email_fld + "\n")
if __name__ == '__main__':
status = main()
sys.exit(status)
| {
"content_hash": "8a6fa02d80e48c6c3fdfda1ec989ec7a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 72,
"avg_line_length": 25.133333333333333,
"alnum_prop": 0.6507515473032714,
"repo_name": "Teknologforeningen/svaksvat",
"id": "231198733bea065391eeb835e525a05a56b82ca5",
"size": "1131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/kommittequery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "145"
},
{
"name": "CSS",
"bytes": "86"
},
{
"name": "HTML",
"bytes": "2187"
},
{
"name": "Python",
"bytes": "423022"
},
{
"name": "Ruby",
"bytes": "2249"
}
],
"symlink_target": ""
} |
"""Implementation of various cryptographic types."""
import hashlib
import struct
from M2Crypto import BIO
from M2Crypto import EVP
from M2Crypto import RSA
from M2Crypto import util
from M2Crypto import X509
import logging
from grr.lib import rdfvalue
from grr.lib import type_info
from grr.lib import utils
from grr.lib.rdfvalues import structs
from grr.proto import jobs_pb2
DIGEST_ALGORITHM = hashlib.sha256
DIGEST_ALGORITHM_STR = "sha256"
class Certificate(structs.RDFProtoStruct):
protobuf = jobs_pb2.Certificate
class RDFX509Cert(rdfvalue.RDFString):
"""X509 certificates used to communicate with this client."""
def _GetCN(self, x509cert):
subject = x509cert.get_subject()
try:
cn_id = subject.nid["CN"]
cn = subject.get_entries_by_nid(cn_id)[0]
except IndexError:
raise rdfvalue.DecodeError("Cert has no CN")
self.common_name = rdfvalue.RDFURN(cn.get_data().as_text())
def GetX509Cert(self):
return X509.load_cert_string(str(self))
def GetPubKey(self):
return self.GetX509Cert().get_pubkey().get_rsa()
def ParseFromString(self, string):
super(RDFX509Cert, self).ParseFromString(string)
try:
self._GetCN(self.GetX509Cert())
except X509.X509Error:
raise rdfvalue.DecodeError("Cert invalid")
class PEMPublicKey(rdfvalue.RDFString):
"""A Public key encoded as a pem file."""
def GetPublicKey(self):
try:
bio = BIO.MemoryBuffer(self._value)
rsa = RSA.load_pub_key_bio(bio)
if rsa.check_key() != 1:
raise RSA.RSAError("RSA.check_key() did not succeed.")
return rsa
except RSA.RSAError as e:
raise type_info.TypeValueError("Public key invalid." % e)
def ParseFromString(self, pem_string):
super(PEMPublicKey, self).ParseFromString(pem_string)
self.GetPublicKey()
class PEMPrivateKey(rdfvalue.RDFString):
"""An RSA private key encoded as a pem file."""
def GetPrivateKey(self, callback=None):
if callback is None:
callback = lambda: ""
return RSA.load_key_string(self._value, callback=callback)
def GetPublicKey(self):
rsa = self.GetPrivateKey()
m = BIO.MemoryBuffer()
rsa.save_pub_key_bio(m)
return PEMPublicKey(m.read_all())
def Validate(self):
try:
rsa = self.GetPrivateKey()
rsa.check_key()
except RSA.RSAError as e:
raise type_info.TypeValueError("Private key invalid: %s" % e)
@classmethod
def GenKey(cls, bits=2048, exponent=65537):
return cls(RSA.gen_key(bits, exponent).as_pem(None))
class Hash(rdfvalue.RDFProtoStruct):
"""A hash object containing multiple digests."""
protobuf = jobs_pb2.Hash
class SignedBlob(rdfvalue.RDFProtoStruct):
"""A signed blob.
The client can receive and verify a signed blob (e.g. driver or executable
binary). Once verified, the client may execute this.
"""
protobuf = jobs_pb2.SignedBlob
def Verify(self, pub_key):
"""Verify the data in this blob.
Args:
pub_key: The public key to use for verification.
Returns:
True when verification succeeds.
Raises:
rdfvalue.DecodeError if the data is not suitable verified.
"""
if self.digest_type != self.HashType.SHA256:
raise rdfvalue.DecodeError("Unsupported digest.")
rsa = pub_key.GetPublicKey()
result = 0
try:
result = rsa.verify(self.digest, self.signature,
DIGEST_ALGORITHM_STR)
if result != 1:
raise rdfvalue.DecodeError("Could not verify blob.")
except RSA.RSAError, e:
raise rdfvalue.DecodeError("Could not verify blob. Error: %s" % e)
digest = hashlib.sha256(self.data).digest()
if digest != self.digest:
raise rdfvalue.DecodeError(
"SignedBlob: Digest did not match actual data.")
if result != 1:
raise rdfvalue.DecodeError("Verification failed.")
return True
def Sign(self, data, signing_key, verify_key=None, prompt=False):
"""Use the data to sign this blob.
Args:
data: String containing the blob data.
signing_key: A key that can be loaded to sign the data as a string.
verify_key: Key to verify with. If None we assume the signing key also
contains the public key.
prompt: If True we allow a password prompt to be presented.
Raises:
IOError: On bad key.
"""
callback = None
if prompt:
callback = util.passphrase_callback
else:
callback = lambda x: ""
digest = DIGEST_ALGORITHM(data).digest()
rsa = signing_key.GetPrivateKey(callback=callback)
if len(rsa) < 2048:
logging.warn("signing key is too short.")
self.signature = rsa.sign(digest, DIGEST_ALGORITHM_STR)
self.signature_type = self.SignatureType.RSA_2048
self.digest = digest
self.digest_type = self.HashType.SHA256
self.data = data
# Test we can verify before we send it off.
if verify_key is None:
verify_key = signing_key.GetPublicKey()
# Verify our own data.
self.Verify(verify_key)
class EncryptionKey(rdfvalue.RDFBytes):
"""Base class for encryption keys."""
# Size of the key in bits.
length = 128
def ParseFromString(self, string):
# Support both hex encoded and raw serializations.
if len(string) == 2 * self.length / 8:
self._value = string.decode("hex")
elif len(string) == self.length / 8:
self._value = string
else:
raise ValueError("%s must be exactly %s bit longs." %
(self.__class__.__name__, self.length))
def __str__(self):
return self._value.encode("hex")
def Generate(self):
self._value = ""
while len(self._value) < self.length/8:
self._value += struct.pack("=L", utils.PRNG.GetULong())
self._value = self._value[:self.length/8]
return self
def RawBytes(self):
return self._value
class AES128Key(EncryptionKey):
length = 128
class Cipher(object):
"""A Cipher that accepts rdfvalue.EncryptionKey objects as key and iv."""
OP_DECRYPT = 0
OP_ENCRYPT = 1
def Update(self, data):
pass
def Final(self):
pass
class AES128CBCCipher(Cipher):
"""An aes_128_cbc cipher."""
def __init__(self, key, iv, mode=Cipher.OP_DECRYPT):
super(AES128CBCCipher, self).__init__()
self.cipher = EVP.Cipher(alg="aes_128_cbc", key=key.RawBytes(),
iv=iv.RawBytes(), op=mode)
def Update(self, data):
return self.cipher.update(data)
def Final(self):
return self.cipher.final()
| {
"content_hash": "732365f36443469427d653177513b710",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 76,
"avg_line_length": 25.840637450199203,
"alnum_prop": 0.6646623496762257,
"repo_name": "simsong/grr-insider",
"id": "c190297deed43cbe5c6a9150f534f95858d8a52e",
"size": "6508",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/rdfvalues/crypto.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36308"
},
{
"name": "JavaScript",
"bytes": "679269"
},
{
"name": "Python",
"bytes": "3553249"
},
{
"name": "Shell",
"bytes": "30813"
}
],
"symlink_target": ""
} |
from collections import namedtuple
import json
import pytest
from pytest_mock import mocker
from seventweets.registry import Registry
@pytest.fixture(scope='function')
def node_data():
Node = namedtuple('Node', 'name, address')
node1 = Node(name='node1', address='node1.example.com')
node2 = Node(name='node2', address='node2.example.com')
node3 = {'name': 'node3', 'address': 'node3.example.com'}
known_nodes = {node1, node2}
self_node = {'name': None, 'address': None}
known_nodes_json = json.dumps([node1._asdict(), node2._asdict(), self_node])
return {'node': node3,
'node1': node1,
'known_nodes': known_nodes,
'known_nodes_json': known_nodes_json}
@pytest.mark.usefixture('node_data')
class TestRegistry:
def test_register(self, node_data):
Node = namedtuple('Node', 'name, address')
node = node_data['node']
known_nodes = node_data['known_nodes']
Registry._known_nodes = known_nodes.copy()
Registry.register(node)
known_nodes.add(Node(**node))
assert Registry._known_nodes == known_nodes
def test_known_nodes(self, node_data):
Registry._known_nodes = node_data['known_nodes']
test_nodes = json.loads(node_data['known_nodes_json'])
try:
result_nodes = json.loads(Registry.known_nodes)
except json.JSONDecodeError:
assert False
# List comparisons consider order of elements.
try:
for elem in result_nodes:
test_nodes.remove(elem)
except ValueError:
assert False
else:
assert not test_nodes
def test_delete_node(self, node_data):
Registry._known_nodes = node_data['known_nodes']
node = node_data['node1']
assert node in Registry._known_nodes
Registry.delete_node('node1')
assert node not in Registry._known_nodes
| {
"content_hash": "0052fa8d82214af6d64d5915e5e6548f",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 80,
"avg_line_length": 27.52112676056338,
"alnum_prop": 0.6151484135107472,
"repo_name": "nzp/seventweets",
"id": "674912ac92fcd05559fa2609f4ff24cbf269dacd",
"size": "1954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_registry.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "34522"
}
],
"symlink_target": ""
} |
"""Provision's Contrail Config components with Open Stack as Orchestrator."""
import os
from fabric.state import env
from fabric.api import local
from fabric.context_managers import settings
from contrail_provisioning.config.common import ConfigBaseSetup
from contrail_provisioning.config.templates import contrail_plugin_ini
from contrail_provisioning.config.templates import contrail_config_nodemgr_template
from contrail_provisioning.common.templates import contrail_database_template
class ConfigOpenstackSetup(ConfigBaseSetup):
def __init__(self, config_args, args_str=None):
super(ConfigOpenstackSetup, self).__init__(config_args)
self._args = config_args
self.keystone_ssl_enabled = False
if (self._args.keystone_keyfile and
self._args.keystone_certfile and self._args.keystone_cafile):
self.keystone_ssl_enabled = True
def fixup_config_files(self):
self.fixup_cassandra_config()
self.fixup_keystone_auth_config_file(True)
self.fixup_contrail_api_config_file()
config_files = [
'/etc/contrail/contrail-api.conf',
'/etc/contrail/contrail-keystone-auth.conf',
'/etc/contrail/contrail-database.conf',
]
if self.pdist in ['Ubuntu'] and self.pdistversion == '16.04':
pass
else:
self.fixup_contrail_api_supervisor_ini(config_files)
self.fixup_contrail_api_initd()
self.fixup_contrail_plugin_ini()
self.fixup_contrail_schema_supervisor_ini()
self.fixup_contrail_device_manager_supervisor_ini()
self.fixup_contrail_svc_monitor_supervisor_ini()
self.fixup_schema_transformer_config_file()
self.fixup_device_manager_config_file()
self.fixup_svc_monitor_config_file()
self.fixup_vnc_api_lib_ini()
self.fixup_contrail_config_nodemgr()
self.fixup_contrail_sudoers()
if self._args.use_certs:
local("sudo setup-pki.sh /etc/contrail/ssl")
def fixup_contrail_api_config_file(self):
super(ConfigOpenstackSetup, self).fixup_contrail_api_config_file()
self.set_config('/etc/contrail/contrail-api.conf', 'DEFAULTS',
'auth', 'keystone')
def fixup_contrail_schema_supervisor_ini(self):
contrail_svc_ini = "/etc/contrail/supervisord_config_files/contrail-schema.ini"
config_files = [
'/etc/contrail/contrail-schema.conf',
'/etc/contrail/contrail-keystone-auth.conf',
'/etc/contrail/contrail-database.conf',
]
config_file_args = ' --conf_file '.join(config_files)
commandline = "/usr/bin/contrail-schema --conf_file %s" % config_file_args
self.set_config(contrail_svc_ini, 'program:contrail-schema',
'command', commandline)
def fixup_contrail_device_manager_supervisor_ini(self):
contrail_svc_ini = "/etc/contrail/supervisord_config_files/contrail-device-manager.ini"
config_files = [
'/etc/contrail/contrail-device-manager.conf',
'/etc/contrail/contrail-keystone-auth.conf',
'/etc/contrail/contrail-database.conf',
]
config_file_args = ' --conf_file '.join(config_files)
commandline = "/usr/bin/contrail-device-manager --conf_file %s" % config_file_args
self.set_config(contrail_svc_ini, 'program:contrail-device-manager',
'command', commandline)
def fixup_contrail_svc_monitor_supervisor_ini(self):
contrail_svc_ini = "/etc/contrail/supervisord_config_files/contrail-svc-monitor.ini"
config_files = [
'/etc/contrail/contrail-svc-monitor.conf',
'/etc/contrail/contrail-keystone-auth.conf',
'/etc/contrail/contrail-database.conf',
]
config_file_args = ' --conf_file '.join(config_files)
commandline = "/usr/bin/contrail-svc-monitor --conf_file %s" % config_file_args
self.set_config(contrail_svc_ini, 'program:contrail-svc-monitor',
'command', commandline)
def fixup_contrail_plugin_ini(self):
# quantum/neutron plugin
template_vals = {'__contrail_api_server_ip__': self.contrail_internal_vip or self._args.first_cfgm_ip or self._args.self_ip,
'__contrail_api_server_port__': '8082',
'__contrail_analytics_server_ip__': self.contrail_internal_vip or self._args.self_ip,
'__contrail_analytics_server_port__': '8081',
'__contrail_keystone_ip__': self._args.keystone_ip,
'__contrail_ks_auth_protocol__': self._args.keystone_auth_protocol,
'__contrail_ks_auth_port__': self._args.keystone_auth_port,
'__contrail_admin_user__': self._args.keystone_admin_user,
'__contrail_admin_password__': self._args.keystone_admin_passwd,
'__contrail_admin_tenant_name__': self._args.keystone_admin_tenant_name,
'__contrail_cloud_admin_role__': "cloud_admin_role=%s" % self._args.cloud_admin_role if self._args.cloud_admin_role else '',
'__contrail_aaa_mode__': "aaa_mode=%s" % self._args.aaa_mode if self._args.aaa_mode else '',
}
self._template_substitute_write(contrail_plugin_ini.template,
template_vals, self._temp_dir_name + '/contrail_plugin.ini')
if os.path.exists("/etc/neutron"):
local("sudo mkdir -p /etc/neutron/plugins/opencontrail")
local("sudo mv %s/contrail_plugin.ini /etc/neutron/plugins/opencontrail/ContrailPlugin.ini" %(self._temp_dir_name))
else:
local("sudo mv %s/contrail_plugin.ini /etc/quantum/plugins/contrail/contrail_plugin.ini" %(self._temp_dir_name))
if self.pdist == 'Ubuntu':
neutron_def_file = "/etc/default/neutron-server"
if os.path.exists(neutron_def_file):
local("sudo sed -i 's/NEUTRON_PLUGIN_CONFIG=.*/NEUTRON_PLUGIN_CONFIG=\"\/etc\/neutron\/plugins\/opencontrail\/ContrailPlugin.ini\"/g' %s" %(neutron_def_file))
if self.api_ssl_enabled:
certfile, cafile, keyfile = self._get_apiserver_certs(
'/etc/neutron/ssl/certs/')
conf_file = '/etc/neutron/plugins/opencontrail/ContrailPlugin.ini'
conf_vals = {'use_ssl' : True,
'insecure': self._args.apiserver_insecure,
'certfile' : certfile,
'keyfile' : certfile,
'cafile' : cafile,
}
for param, value in conf_vals.items():
self.set_config(conf_file, 'APISERVER', param, value)
def build_ctrl_details(self):
ctrl_infos = []
ctrl_details = "%s/ctrl-details" % self._temp_dir_name
ctrl_infos.append('SERVICE_TENANT=%s' % self._args.keystone_service_tenant_name)
ctrl_infos.append('SERVICE_TOKEN=%s' % self._args.service_token)
ctrl_infos.append('AUTH_PROTOCOL=%s' % self._args.keystone_auth_protocol)
if self._args.keystone_auth_protocol == 'https':
ctrl_infos.append('KEYSTONE_INSECURE=%s' % self._args.keystone_insecure)
ctrl_infos.append('APISERVER_INSECURE=%s' % self._args.apiserver_insecure)
ctrl_infos.append('QUANTUM_PROTOCOL=%s' % self._args.quantum_service_protocol)
ctrl_infos.append('ADMIN_TOKEN=%s' % self._args.keystone_admin_passwd)
if self._args.openstack_ctrl_ip == None:
ctrl_infos.append('CONTROLLER=%s' % self._args.keystone_ip)
else:
ctrl_infos.append('CONTROLLER=%s' % self._args.openstack_ctrl_ip)
ctrl_infos.append('KEYSTONE_SERVER=%s' % self._args.keystone_ip)
ctrl_infos.append('CONTROLLER=%s' % self._args.keystone_ip)
ctrl_infos.append('AMQP_SERVER=%s' % self.rabbit_servers)
ctrl_infos.append('NEUTRON_PASSWORD=%s' % self._args.neutron_password)
ctrl_infos.append('KEYSTONE_VERSION=%s' % self._args.keystone_version)
if self._args.haproxy:
ctrl_infos.append('QUANTUM=127.0.0.1')
else:
ctrl_infos.append('QUANTUM=%s' % self.cfgm_ip)
ctrl_infos.append('QUANTUM_PORT=%s' % self._args.quantum_port)
ctrl_infos.append('AAA_MODE=%s' % (self._args.aaa_mode or ''))
if self.keystone_ssl_enabled:
certfile, cafile, keyfile = self._get_keystone_certs(
'/etc/neutron/ssl/certs/')
ctrl_infos.append('KEYSTONE_CERTFILE=%s' % certfile)
ctrl_infos.append('KEYSTONE_KEYFILE=%s' % certfile)
ctrl_infos.append('KEYSTONE_CAFILE=%s' % cafile)
self.update_vips_in_ctrl_details(ctrl_infos)
for ctrl_info in ctrl_infos:
local ("sudo echo %s >> %s" % (ctrl_info, ctrl_details))
local("sudo cp %s /etc/contrail/ctrl-details" % ctrl_details)
local("sudo rm %s/ctrl-details" %(self._temp_dir_name))
def run_services(self):
if self.contrail_internal_vip:
quantum_ip = self.contrail_internal_vip
elif self._args.first_cfgm_ip:
quantum_ip = self._args.first_cfgm_ip
else:
quantum_ip = self.cfgm_ip
quant_args = '--ks_server_ip %s ' % self._args.keystone_ip + \
'--quant_server_ip %s ' % quantum_ip + \
'--tenant %s ' % self._args.keystone_admin_tenant_name + \
'--user %s ' % self._args.keystone_admin_user + \
'--password %s ' % self._args.keystone_admin_passwd + \
'--svc_password %s ' % self._args.neutron_password + \
'--svc_tenant_name %s ' % self._args.keystone_service_tenant_name + \
'--root_password %s ' % env.password + \
'--auth_protocol %s ' % self._args.keystone_auth_protocol
if self._args.keystone_insecure:
quant_args += '--insecure'
if self._args.region_name:
quant_args += " --region_name %s" %(self._args.region_name)
if self._args.manage_neutron == 'yes':
local("setup-quantum-in-keystone %s" %(quant_args))
super(ConfigOpenstackSetup, self).run_services()
if self._args.provision_neutron_server == 'yes':
local("sudo quantum-server-setup.sh")
def setup(self):
self.disable_selinux()
self.disable_iptables()
self.setup_coredump()
self.setup_database()
self.fixup_config_files()
self.build_ctrl_details()
self.run_services()
| {
"content_hash": "567f2bb05268864e8a77ceffd1b3bf40",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 174,
"avg_line_length": 52.73557692307692,
"alnum_prop": 0.59002643814386,
"repo_name": "Juniper/contrail-provisioning",
"id": "19c845f2e2dec8361f61044e50767365bfbd3040",
"size": "11056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrail_provisioning/config/openstack.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "483390"
},
{
"name": "Shell",
"bytes": "304085"
}
],
"symlink_target": ""
} |
import datetime
import unittest
from unittest import mock
from airflow.models.dag import DAG
from airflow.providers.microsoft.azure.transfers.file_to_wasb import FileToWasbOperator
class TestFileToWasbOperator(unittest.TestCase):
_config = {
'file_path': 'file',
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'wasb_default',
'retries': 3,
}
def setUp(self):
args = {'owner': 'airflow', 'start_date': datetime.datetime(2017, 1, 1)}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
operator = FileToWasbOperator(task_id='wasb_operator_1', dag=self.dag, **self._config)
self.assertEqual(operator.file_path, self._config['file_path'])
self.assertEqual(operator.container_name, self._config['container_name'])
self.assertEqual(operator.blob_name, self._config['blob_name'])
self.assertEqual(operator.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(operator.load_options, {})
self.assertEqual(operator.retries, self._config['retries'])
operator = FileToWasbOperator(
task_id='wasb_operator_2', dag=self.dag, load_options={'timeout': 2}, **self._config
)
self.assertEqual(operator.load_options, {'timeout': 2})
@mock.patch('airflow.providers.microsoft.azure.transfers.file_to_wasb.WasbHook', autospec=True)
def test_execute(self, mock_hook):
mock_instance = mock_hook.return_value
operator = FileToWasbOperator(
task_id='wasb_sensor', dag=self.dag, load_options={'timeout': 2}, **self._config
)
operator.execute(None)
mock_instance.load_file.assert_called_once_with('file', 'container', 'blob', timeout=2)
| {
"content_hash": "4e6c50ba0c3128988d417bbc85e90e21",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 99,
"avg_line_length": 40.61363636363637,
"alnum_prop": 0.6547285954113039,
"repo_name": "airbnb/airflow",
"id": "5a4f14c729259d6d544705b384080e4b51f47fda",
"size": "2577",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/providers/microsoft/azure/transfers/test_file_to_wasb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36374"
},
{
"name": "HTML",
"bytes": "99535"
},
{
"name": "JavaScript",
"bytes": "891618"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "796220"
},
{
"name": "Shell",
"bytes": "9040"
}
],
"symlink_target": ""
} |
import logging
class UserCacheHandler(object):
@staticmethod
def getUserCacheHandler(uuid):
"""
Returns the current usercache handler configured in the config file,
or the builtin cache if nothing is configured
"""
# We put the import statement in here to avoid circular dependencies
# It is also not needed anywhere else since this is a completely static
# function otherwise
import emission.net.usercache.builtin_usercache_handler as biuc
return biuc.BuiltinUserCacheHandler(uuid)
def __init__(self, user_id):
self.user_id = user_id
def moveToLongTerm(self):
"""
Moves all messages that have arrived for the current user into long-term
storage, after converting into a platform-independent format.
"""
pass
def storeViewsToCache(self):
"""
Moves all messages that have arrived for the current user into long-term
storage, after converting into a platform-independent format.
"""
pass
| {
"content_hash": "619c9df3e11a305b171e179f65f06acb",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 34.774193548387096,
"alnum_prop": 0.6549165120593692,
"repo_name": "joshzarrabi/e-mission-server",
"id": "a23c929bfec91f97db979534d3f4c0107a54c353",
"size": "1078",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "emission/net/usercache/abstract_usercache_handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "12835"
},
{
"name": "HTML",
"bytes": "50997"
},
{
"name": "JavaScript",
"bytes": "3507788"
},
{
"name": "Python",
"bytes": "1190346"
},
{
"name": "Shell",
"bytes": "1191"
},
{
"name": "Smarty",
"bytes": "3456"
}
],
"symlink_target": ""
} |
import json
import frappe
from frappe.geo.country_info import get_country_info
from frappe.translate import get_messages_for_boot, send_translations, set_default_language
from frappe.utils import cint, strip
from frappe.utils.password import update_password
from . import install_fixtures
def get_setup_stages(args):
# App setup stage functions should not include frappe.db.commit
# That is done by frappe after successful completion of all stages
stages = [
{
"status": "Updating global settings",
"fail_msg": "Failed to update global settings",
"tasks": [
{"fn": update_global_settings, "args": args, "fail_msg": "Failed to update global settings"}
],
}
]
stages += get_stages_hooks(args) + get_setup_complete_hooks(args)
stages.append(
{
# post executing hooks
"status": "Wrapping up",
"fail_msg": "Failed to complete setup",
"tasks": [
{"fn": run_post_setup_complete, "args": args, "fail_msg": "Failed to complete setup"}
],
}
)
return stages
@frappe.whitelist()
def setup_complete(args):
"""Calls hooks for `setup_wizard_complete`, sets home page as `desktop`
and clears cache. If wizard breaks, calls `setup_wizard_exception` hook"""
# Setup complete: do not throw an exception, let the user continue to desk
if cint(frappe.db.get_single_value("System Settings", "setup_complete")):
return {"status": "ok"}
args = parse_args(args)
stages = get_setup_stages(args)
is_background_task = frappe.conf.get("trigger_site_setup_in_background")
if is_background_task:
process_setup_stages.enqueue(stages=stages, user_input=args, is_background_task=True)
return {"status": "registered"}
else:
return process_setup_stages(stages, args)
@frappe.task()
def process_setup_stages(stages, user_input, is_background_task=False):
try:
frappe.flags.in_setup_wizard = True
current_task = None
for idx, stage in enumerate(stages):
frappe.publish_realtime(
"setup_task",
{"progress": [idx, len(stages)], "stage_status": stage.get("status")},
user=frappe.session.user,
)
for task in stage.get("tasks"):
current_task = task
task.get("fn")(task.get("args"))
except Exception:
handle_setup_exception(user_input)
if not is_background_task:
return {"status": "fail", "fail": current_task.get("fail_msg")}
frappe.publish_realtime(
"setup_task",
{"status": "fail", "fail_msg": current_task.get("fail_msg")},
user=frappe.session.user,
)
else:
run_setup_success(user_input)
if not is_background_task:
return {"status": "ok"}
frappe.publish_realtime("setup_task", {"status": "ok"}, user=frappe.session.user)
finally:
frappe.flags.in_setup_wizard = False
def update_global_settings(args):
if args.language and args.language != "English":
set_default_language(get_language_code(args.lang))
frappe.db.commit()
frappe.clear_cache()
update_system_settings(args)
update_user_name(args)
def run_post_setup_complete(args):
disable_future_access()
frappe.db.commit()
frappe.clear_cache()
def run_setup_success(args):
for hook in frappe.get_hooks("setup_wizard_success"):
frappe.get_attr(hook)(args)
install_fixtures.install()
def get_stages_hooks(args):
stages = []
for method in frappe.get_hooks("setup_wizard_stages"):
stages += frappe.get_attr(method)(args)
return stages
def get_setup_complete_hooks(args):
stages = []
for method in frappe.get_hooks("setup_wizard_complete"):
stages.append(
{
"status": "Executing method",
"fail_msg": "Failed to execute method",
"tasks": [
{"fn": frappe.get_attr(method), "args": args, "fail_msg": "Failed to execute method"}
],
}
)
return stages
def handle_setup_exception(args):
frappe.db.rollback()
if args:
traceback = frappe.get_traceback()
print(traceback)
for hook in frappe.get_hooks("setup_wizard_exception"):
frappe.get_attr(hook)(traceback, args)
def update_system_settings(args):
number_format = get_country_info(args.get("country")).get("number_format", "#,###.##")
# replace these as float number formats, as they have 0 precision
# and are currency number formats and not for floats
if number_format == "#.###":
number_format = "#.###,##"
elif number_format == "#,###":
number_format = "#,###.##"
system_settings = frappe.get_doc("System Settings", "System Settings")
system_settings.update(
{
"country": args.get("country"),
"language": get_language_code(args.get("language")) or "en",
"time_zone": args.get("timezone"),
"float_precision": 3,
"date_format": frappe.db.get_value("Country", args.get("country"), "date_format"),
"time_format": frappe.db.get_value("Country", args.get("country"), "time_format"),
"number_format": number_format,
"enable_scheduler": 1 if not frappe.flags.in_test else 0,
"backup_limit": 3, # Default for downloadable backups
}
)
system_settings.save()
def update_user_name(args):
first_name, last_name = args.get("full_name", ""), ""
if " " in first_name:
first_name, last_name = first_name.split(" ", 1)
if args.get("email"):
if frappe.db.exists("User", args.get("email")):
# running again
return
args["name"] = args.get("email")
_mute_emails, frappe.flags.mute_emails = frappe.flags.mute_emails, True
doc = frappe.get_doc(
{
"doctype": "User",
"email": args.get("email"),
"first_name": first_name,
"last_name": last_name,
}
)
doc.flags.no_welcome_mail = True
doc.insert()
frappe.flags.mute_emails = _mute_emails
update_password(args.get("email"), args.get("password"))
elif first_name:
args.update({"name": frappe.session.user, "first_name": first_name, "last_name": last_name})
frappe.db.sql(
"""update `tabUser` SET first_name=%(first_name)s,
last_name=%(last_name)s WHERE name=%(name)s""",
args,
)
if args.get("attach_user"):
attach_user = args.get("attach_user").split(",")
if len(attach_user) == 3:
filename, filetype, content = attach_user
_file = frappe.get_doc(
{
"doctype": "File",
"file_name": filename,
"attached_to_doctype": "User",
"attached_to_name": args.get("name"),
"content": content,
"decode": True,
}
)
_file.save()
fileurl = _file.file_url
frappe.db.set_value("User", args.get("name"), "user_image", fileurl)
if args.get("name"):
add_all_roles_to(args.get("name"))
def parse_args(args):
if not args:
args = frappe.local.form_dict
if isinstance(args, str):
args = json.loads(args)
args = frappe._dict(args)
# strip the whitespace
for key, value in args.items():
if isinstance(value, str):
args[key] = strip(value)
return args
def add_all_roles_to(name):
user = frappe.get_doc("User", name)
for role in frappe.db.sql("""select name from tabRole"""):
if role[0] not in [
"Administrator",
"Guest",
"All",
"Customer",
"Supplier",
"Partner",
"Employee",
]:
d = user.append("roles")
d.role = role[0]
user.save()
def disable_future_access():
frappe.db.set_default("desktop:home_page", "workspace")
frappe.db.set_value("System Settings", "System Settings", "setup_complete", 1)
# Enable onboarding after install
frappe.db.set_value("System Settings", "System Settings", "enable_onboarding", 1)
if not frappe.flags.in_test:
# remove all roles and add 'Administrator' to prevent future access
page = frappe.get_doc("Page", "setup-wizard")
page.roles = []
page.append("roles", {"role": "Administrator"})
page.flags.do_not_update_json = True
page.flags.ignore_permissions = True
page.save()
@frappe.whitelist()
def load_messages(language):
"""Load translation messages for given language from all `setup_wizard_requires`
javascript files"""
frappe.clear_cache()
set_default_language(get_language_code(language))
frappe.db.commit()
send_translations(get_messages_for_boot())
return frappe.local.lang
@frappe.whitelist()
def load_languages():
language_codes = frappe.db.sql(
"select language_code, language_name from tabLanguage order by name", as_dict=True
)
codes_to_names = {}
for d in language_codes:
codes_to_names[d.language_code] = d.language_name
return {
"default_language": frappe.db.get_value("Language", frappe.local.lang, "language_name")
or frappe.local.lang,
"languages": sorted(frappe.db.sql_list("select language_name from tabLanguage order by name")),
"codes_to_names": codes_to_names,
}
@frappe.whitelist()
def load_country():
from frappe.sessions import get_geo_ip_country
return get_geo_ip_country(frappe.local.request_ip) if frappe.local.request_ip else None
@frappe.whitelist()
def load_user_details():
return {
"full_name": frappe.cache().hget("full_name", "signup"),
"email": frappe.cache().hget("email", "signup"),
}
def prettify_args(args):
# remove attachments
for key, val in args.items():
if isinstance(val, str) and "data:image" in val:
filename = val.split("data:image", 1)[0].strip(", ")
size = round((len(val) * 3 / 4) / 1048576.0, 2)
args[key] = f"Image Attached: '{filename}' of size {size} MB"
pretty_args = []
for key in sorted(args):
pretty_args.append(f"{key} = {args[key]}")
return pretty_args
def email_setup_wizard_exception(traceback, args):
if not frappe.conf.setup_wizard_exception_email:
return
pretty_args = prettify_args(args)
message = """
#### Traceback
<pre>{traceback}</pre>
---
#### Setup Wizard Arguments
<pre>{args}</pre>
---
#### Request Headers
<pre>{headers}</pre>
---
#### Basic Information
- **Site:** {site}
- **User:** {user}""".format(
site=frappe.local.site,
traceback=traceback,
args="\n".join(pretty_args),
user=frappe.session.user,
headers=frappe.request.headers,
)
frappe.sendmail(
recipients=frappe.conf.setup_wizard_exception_email,
sender=frappe.session.user,
subject=f"Setup failed: {frappe.local.site}",
message=message,
delayed=False,
)
def log_setup_wizard_exception(traceback, args):
with open("../logs/setup-wizard.log", "w+") as setup_log:
setup_log.write(traceback)
setup_log.write(json.dumps(args))
def get_language_code(lang):
return frappe.db.get_value("Language", {"language_name": lang})
def enable_twofactor_all_roles():
all_role = frappe.get_doc("Role", {"role_name": "All"})
all_role.two_factor_auth = True
all_role.save(ignore_permissions=True)
def make_records(records, debug=False):
from frappe import _dict
from frappe.modules import scrub
if debug:
print("make_records: in DEBUG mode")
# LOG every success and failure
for record in records:
doctype = record.get("doctype")
condition = record.get("__condition")
if condition and not condition():
continue
doc = frappe.new_doc(doctype)
doc.update(record)
# ignore mandatory for root
parent_link_field = "parent_" + scrub(doc.doctype)
if doc.meta.get_field(parent_link_field) and not doc.get(parent_link_field):
doc.flags.ignore_mandatory = True
savepoint = "setup_fixtures_creation"
try:
frappe.db.savepoint(savepoint)
doc.insert(ignore_permissions=True, ignore_if_duplicate=True)
except Exception as e:
frappe.clear_last_message()
frappe.db.rollback(save_point=savepoint)
exception = record.get("__exception")
if exception:
config = _dict(exception)
if isinstance(e, config.exception):
config.handler()
else:
show_document_insert_error()
else:
show_document_insert_error()
def show_document_insert_error():
print("Document Insert Error")
print(frappe.get_traceback())
frappe.log_error("Exception during Setup")
| {
"content_hash": "66b060f4c636341f7b974fd03cfccfb0",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 97,
"avg_line_length": 26.011286681715575,
"alnum_prop": 0.6807255055107176,
"repo_name": "frappe/frappe",
"id": "408776fcb93329ab306055dffe519289fbf303db",
"size": "11621",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/desk/page/setup_wizard/setup_wizard.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65093"
},
{
"name": "HTML",
"bytes": "250850"
},
{
"name": "JavaScript",
"bytes": "2523337"
},
{
"name": "Less",
"bytes": "10921"
},
{
"name": "Python",
"bytes": "3618097"
},
{
"name": "SCSS",
"bytes": "261690"
},
{
"name": "Vue",
"bytes": "98456"
}
],
"symlink_target": ""
} |
import os, time
import libxml2
from sfatables.globals import sfatables_config
from sfatables.pretty import Pretty
from sfatables.command import Command
class List(Command):
def __init__(self):
self.options = [('-L','--list')]
self.help = 'List a chain'
self.key='list_rule'
self.matches = False
self.targets = False
return
def get_info(self, type, xmlextension_path):
xmldoc = libxml2.parseFile(xmlextension_path)
p = xmldoc.xpathNewContext()
ext_name_node = p.xpathEval("/%s/@name"%type)
ext_name = ext_name_node[0].content
name_nodes = p.xpathEval("//rule/argument[value!='']/name")
value_nodes = p.xpathEval("//rule/argument[value!='']/value")
element_nodes = p.xpathEval("//argument[value!='']/parent::rule/@element")
if (len(element_nodes)>1):
raise Exception("Invalid rule %s contains multiple elements."%xmlextension_path)
element = []
argument_str = ""
if element_nodes:
element = element_nodes[0].content
names = [n.content for n in name_nodes]
values = [v.content for v in value_nodes]
name_values = zip(names,values)
name_value_pairs = map(lambda (n,v):n+'='+v, name_values)
argument_str = ",".join(name_value_pairs)
p.xpathFreeContext()
xmldoc.freeDoc()
return {'name':ext_name, 'arguments':argument_str, 'element':element}
def get_rule_list(self, chain_dir_path):
broken_semantics = os.walk(chain_dir_path)
rule_numbers = {}
for (root, dirs, files) in broken_semantics:
for file in files:
if (file.startswith('sfatables')):
(magic,number,type) = file.split('-')
rule_numbers[int(number)]=1
rule_list = rule_numbers.keys()
rule_list.sort()
return rule_list
def call(self, command_options, match_options, target_options):
if (len(command_options.args) < 1):
print "Please specify the name of the chain you would like to list, e.g. sfatables -L INCOMING."
return
chain = command_options.args[0]
chain_dir = os.path.join(sfatables_config, chain)
rule_list = self.get_rule_list(chain_dir)
pretty = Pretty(['Rule','Match','Arguments','Target','Element','Arguments'])
for number in rule_list:
match_file = "sfatables-%d-%s"%(number,'match')
target_file = "sfatables-%d-%s"%(number,'target')
match_path = sfatables_config + '/' + chain + '/' + match_file
target_path = sfatables_config + '/' + chain + '/' + target_file
match_info = self.get_info ('match',match_path)
target_info = self.get_info ('target',target_path)
pretty.push_row(["%d"%number,
match_info['name'],
match_info['arguments'],
target_info['name'],
target_info['element'],
target_info['arguments']])
pretty.pprint()
| {
"content_hash": "cb249f58d95b4a164ab5be025e7866cf",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 108,
"avg_line_length": 31.54901960784314,
"alnum_prop": 0.5540708514605345,
"repo_name": "yippeecw/sfa",
"id": "cea40bb79d8a1ba1a101b149b9193935c9903b77",
"size": "3218",
"binary": false,
"copies": "2",
"ref": "refs/heads/geni-v3",
"path": "sfatables/commands/List.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "424"
},
{
"name": "Makefile",
"bytes": "14208"
},
{
"name": "Python",
"bytes": "1398912"
},
{
"name": "Shell",
"bytes": "19422"
},
{
"name": "XSLT",
"bytes": "15293"
}
],
"symlink_target": ""
} |
import os
import sys
from docutils import nodes
from docutils.parsers.rst import Directive
"""
Example of a Sphinx Directive.
This is useful for for working with large blocks of text. Leaving here as a reference.
Roles are more useful for inline text, which is what we need for tickets.
class Ticket(Directive):
required_arguments = 1
def run(self):
ticket_url = "<a href=\"https://htcondor-wiki.cs.wisc.edu/index.cgi/tktview?tn=" + str(self.arguments[0]) + "\" target=\"blank\">link text</a>"
paragraph_node = nodes.paragraph(text=ticket_url)
return [paragraph_node]
def setup(app):
app.add_directive("ticket", Ticket)
"""
# Custom role reference: https://doughellmann.com/blog/2010/05/09/defining-custom-roles-in-sphinx/
def make_link_node(rawtext, app, type, slug, options):
"""Create a link to an HTCondor ticket.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param type: Link type (issue, changeset, etc.)
:param slug: ID of the ticket to link to
:param options: Options dictionary passed to role func.
"""
base = "https://htcondor-wiki.cs.wisc.edu/index.cgi/tktview?tn="
ref = base + slug
# set_classes(options)
node = nodes.reference(rawtext, "(Ticket #" + slug + ")", refuri=ref, **options)
return node
def ticket_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
try:
ticket_id = int(text)
if ticket_id <= 1000:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'HTCondor ticket number must be a number greater than or equal to 1000; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
node = make_link_node(rawtext, app, 'issue', str(ticket_id), options)
return [node], []
def setup(app):
app.add_role("ticket", ticket_role)
| {
"content_hash": "38597e311e8f881d56e93d0e85fd99e9",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 151,
"avg_line_length": 34.63793103448276,
"alnum_prop": 0.6635141861622698,
"repo_name": "htcondor/htcondor",
"id": "5a7a5cde18b1ea07fcfa442e37ce10db9ba58423",
"size": "2009",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/extensions/ticket.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "71055"
},
{
"name": "Awk",
"bytes": "9454"
},
{
"name": "Batchfile",
"bytes": "146264"
},
{
"name": "C",
"bytes": "1651049"
},
{
"name": "C++",
"bytes": "31790435"
},
{
"name": "CMake",
"bytes": "468527"
},
{
"name": "CSS",
"bytes": "9738"
},
{
"name": "Dockerfile",
"bytes": "75955"
},
{
"name": "Fortran",
"bytes": "1279"
},
{
"name": "HTML",
"bytes": "59724"
},
{
"name": "Java",
"bytes": "43977"
},
{
"name": "JavaScript",
"bytes": "130293"
},
{
"name": "M4",
"bytes": "20440"
},
{
"name": "Makefile",
"bytes": "68811"
},
{
"name": "Perl",
"bytes": "3761627"
},
{
"name": "PowerShell",
"bytes": "5412"
},
{
"name": "Python",
"bytes": "1593654"
},
{
"name": "Roff",
"bytes": "2353"
},
{
"name": "Shell",
"bytes": "579393"
},
{
"name": "VBScript",
"bytes": "8734"
},
{
"name": "Yacc",
"bytes": "13532"
}
],
"symlink_target": ""
} |
from collections import deque
from enum import Enum
from typing import *
from asq.initiators import query
from asq.queryables import Queryable
from twisted.internet.defer import succeed
from mdstudio.deferred.chainable import chainable, Chainable
from mdstudio.deferred.return_value import return_value
from mdstudio.session import GlobalSession
query = query
class CursorRefreshingError(Exception):
def __init__(self):
self.message = "Yield or wait for the callback of the previous result."
class Cursor:
class Direction(Enum):
Forward = 1
Backward = -1
# type: str
_next = None
# type: str
_previous = None
# type: deque
_data = deque()
# type: bool
_alive = True
# type: int
_returned = None
# type: str
_uri = None
# type: dict
_claims = None
# type: dict
_current = 0
def __init__(self, response, claims=None, session=None):
self._session = session or GlobalSession.session
self._claims = claims
self._next = response['paging'].get('next', None)
self._previous = response['paging'].get('previous', None)
self._alive = (self._next is not None or self._previous is not None)
self._data = deque(response['results'])
self._uri = response['paging']['uri']
self._refreshing = False
self._current = 0
def __iter__(self):
return self
def next(self):
if self._refreshing:
raise CursorRefreshingError()
len_data = len(self._data)
if len_data - self._current > 1:
result = self._data[self._current]
self._current += 1
return Chainable(succeed(result))
elif self.alive and self._next:
self._refreshing = True
return self._refresh(self.Direction.Forward)
elif len_data:
result = self._data[self._current]
self._current += 1
return Chainable(succeed(result))
else:
raise StopIteration
def previous(self):
raise NotImplementedError()
# python 3 compatibility
__next__ = next
@chainable
def for_each(self, func):
# type: (Callable[[Dict[str,Any]], None]) -> None
for o in self:
o = yield o
func(o)
def query(self):
# type: () -> Queryable
return self.to_list().addCallback(lambda l: query(l))
@chainable
def to_list(self):
results = []
for doc in self:
results.append((yield doc))
return_value(results)
@property
def alive(self):
# type: () -> bool
return self._alive
@staticmethod
@chainable
def from_uri(uri, request, claims=None, session=None):
session = session or GlobalSession.session
return_value(Cursor((yield session.call(uri, request, claims)), session=session, claims=claims))
@chainable
def _refresh(self, direction):
if direction == self.Direction.Forward:
more = yield self._session.call(self._uri, {
'next': self._next
})
last_entry = self._data[self._current]
self._data = more['results']
self._current = 0
else:
more = yield self._session.call(self._uri, {
'previous': self._previous
})
last_entry = self._data[self._current - 1]
self._data = more['results'] + self._data[:self._current]
self._current += len(more['results'])
self._next = more['paging'].get('next', None)
self._previous = more['paging'].get('previous', None)
self._alive = (self._next is not None or self._previous is not None)
self._refreshing = False
return_value(last_entry)
| {
"content_hash": "727eb49d659358c94d42a3ca8c98c185",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 104,
"avg_line_length": 27.21276595744681,
"alnum_prop": 0.5783163930153766,
"repo_name": "MD-Studio/MDStudio",
"id": "75dfb09480c00d7afc7193d3f9f5be6f9023e505",
"size": "3852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mdstudio/mdstudio/api/cursor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70059"
},
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "48489"
},
{
"name": "JavaScript",
"bytes": "2621"
},
{
"name": "Makefile",
"bytes": "6901"
},
{
"name": "Python",
"bytes": "711926"
},
{
"name": "Shell",
"bytes": "6139"
},
{
"name": "TypeScript",
"bytes": "66514"
}
],
"symlink_target": ""
} |
from datetime import datetime
class ExcludeError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return "{0}".format(self.error)
class ExcludeMixin():
"""Common functionality to handle exclusion dates
Instance Variables:
exclude -- a list of Date objects
date_format -- the format to use with strptime and strftime
"""
def __init__(self, excludes, date_format='%d/%m/%Y'):
self.date_format = date_format
if excludes:
self.exclude = []
try:
for d in excludes:
self.exclude.append(
datetime.strptime(d, self.date_format).date())
except (TypeError, ValueError):
raise ExcludeError('Bad value in excludes')
else:
self.exclude = []
def is_excluded(self, check_date):
"""Should date be excluded?
Arguments:
check_date -- date to check
Returns:
true if check_date is in the list of excludes; false otherwise
"""
try:
return check_date in self.exclude
except AttributeError:
return False
def excludes_as_string(self):
excludes = []
for d in self.exclude:
excludes.append(datetime.strftime(d, self.date_format))
return excludes
def add_exclude(self, exclude):
"""Add exclusions
Arguments:
exclude -- Date to exclude
"""
try:
self.exclude.append(
datetime.strptime(exclude, self.date_format).date())
except (TypeError, ValueError):
raise ExcludeError('Bad value adding exclusion')
| {
"content_hash": "217f4a31d09169866b93085cff8a6bc1",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 70,
"avg_line_length": 26.692307692307693,
"alnum_prop": 0.5619596541786743,
"repo_name": "mikepyne/RotaGenerator",
"id": "b7ab9bfe1ad93419aa7e5e9ffe66ce5a1bd4dda9",
"size": "1735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rota/exclude.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38557"
}
],
"symlink_target": ""
} |
import types
from django.template import loader
from .base import AutocompleteBase
class AutocompleteTemplate(AutocompleteBase):
"""
This extension of :py:class:`~.base.AutocompleteBase` supports two new
attributes:
.. py:attribute:: choice_template
Name of the template to use to render a choice in the autocomplete. If
none is specified, then :py:class:`~.base.AutocompleteBase` will render
the choice.
.. py:attribute:: autocomplete_template
Name of the template to use to render the autocomplete. Again, fall
back on :py:class:`~.base.AutocompleteBase` if this is None.
"""
choice_template = None
autocomplete_template = None
def get_base_context(self):
"""
Return a dict to use as base context for all templates.
It contains:
- ``{{ request }}`` if available,
- ``{{ autocomplete }}`` the "self" instance.
"""
return {
'request': self.request,
'autocomplete': self,
}
def render_template_context(self, template, extra_context=None):
"""
Render ``template`` with base context and ``extra_context``.
"""
context = self.get_base_context()
context.update(extra_context or {})
return loader.render_to_string(template, context)
def autocomplete_html(self):
"""
Render :py:attr:`autocomplete_template` with base context and ``{{
choices }}``. If :py:attr:`autocomplete_template` is None then fall
back on :py:meth:`.base.AutocompleteBase.autocomplete_html`.
"""
if self.autocomplete_template:
choices = self.choices_for_request()
return self.render_template_context(self.autocomplete_template,
{'choices': choices})
else:
return super(AutocompleteTemplate, self).autocomplete_html()
def choice_html(self, choice):
"""
Render :py:attr:`choice_template` with base context and ``{{ choice
}}``. If :py:attr:`choice_template` is None then fall back on
:py:meth:`.base.AutocompleteBase.choice_html()`.
"""
if self.choice_template:
return self.render_template_context(self.choice_template,
{'choice': choice})
else:
return super(AutocompleteTemplate, self).choice_html(choice)
| {
"content_hash": "803196ea3aeb41c6ab2e806b8e6f79f8",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 33.04109589041096,
"alnum_prop": 0.6135986733001658,
"repo_name": "Suite5/DataColibri",
"id": "71f2c8d67ea184ed433302027738be0ede1b6db4",
"size": "2412",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "autocomplete_light/autocomplete/template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "381"
},
{
"name": "CSS",
"bytes": "944246"
},
{
"name": "HTML",
"bytes": "566711"
},
{
"name": "JavaScript",
"bytes": "1510227"
},
{
"name": "PHP",
"bytes": "972"
},
{
"name": "Python",
"bytes": "1046512"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
from lib.action import BitBucketAction
class ArchiveRepoAction(BitBucketAction):
def run(self, repo):
"""
Archive a Repository, returns path to
archived repository
"""
bb = self._get_client(repo=repo)
success, archive_path = bb.repository.archive()
return archive_path
| {
"content_hash": "756655fe04d99105cd68c75541307365",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 27.583333333333332,
"alnum_prop": 0.6374622356495468,
"repo_name": "pearsontechnology/st2contrib",
"id": "089259e48a94b48875a5dc20b9bed259a578cb0b",
"size": "331",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "packs/bitbucket/actions/archive_repo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "HTML",
"bytes": "675"
},
{
"name": "Makefile",
"bytes": "4592"
},
{
"name": "Python",
"bytes": "913112"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "16263"
}
],
"symlink_target": ""
} |
import itertools
from typing import Iterable, Iterator, Optional, TypeVar
T = TypeVar("T")
def window(
iterator: Iterable[T], behind: int = 0, ahead: int = 0
) -> Iterator[tuple[Optional[T], ...]]:
"""
Sliding window for an iterator.
Example:
>>> for prev, i, nxt in window(range(10), 1, 1):
>>> print(prev, i, nxt)
None 0 1
0 1 2
1 2 3
2 3 None
"""
# TODO: move into utils
iters: list[Iterator[Optional[T]]] = list(
itertools.tee(iterator, behind + 1 + ahead)
)
for i in range(behind):
iters[i] = itertools.chain((behind - i) * [None], iters[i])
for i in range(ahead):
iters[-1 - i] = itertools.islice(
itertools.chain(iters[-1 - i], (ahead - i) * [None]), (ahead - i), None
)
return zip(*iters)
| {
"content_hash": "2c1e243c5b700b4497784df700d4d7c3",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 83,
"avg_line_length": 26.3125,
"alnum_prop": 0.5415676959619953,
"repo_name": "mitmproxy/mitmproxy",
"id": "dca71cbd6c8a828379e5edced89b1f7e97ffdba0",
"size": "842",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "mitmproxy/utils/sliding_window.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3618"
},
{
"name": "Dockerfile",
"bytes": "618"
},
{
"name": "HTML",
"bytes": "10672"
},
{
"name": "JavaScript",
"bytes": "134086"
},
{
"name": "Kaitai Struct",
"bytes": "3670"
},
{
"name": "Less",
"bytes": "21203"
},
{
"name": "PowerShell",
"bytes": "258"
},
{
"name": "Python",
"bytes": "2374404"
},
{
"name": "Shell",
"bytes": "3013"
},
{
"name": "TypeScript",
"bytes": "279000"
}
],
"symlink_target": ""
} |
from __future__ import division
import pandas as pd
import numpy as np
import networkx as nx
import geopandas as gp
from osmnx.utils import make_str
from shapely.geometry import LineString, Point
from .osmnx_simplify_overwrite import simplify_graph as simp_g
__all__ = ['prepare_centroids_network',
'prepare_centroids_network2',
'prepare_gdf_network',
'prepare_centroids_network',
'gdf_to_simplified_multidigraph',
'multigraph_to_graph',
'graph_to_df',
'prepare_adm_background',
'create_link_capacity',
'check_connected_components',
'read_dotfile']
def prepare_centroids_network2(centroids, network, net_from='', net_to=''):
'''
Take transport network and centroids shapefiles as inputs
then returns a geodataframe of the transport network with
indicated centroid nodes
Parameters
------------
centroid: str
string of centroid shapefile's address+filename
network: str
string of network shapefile's address+name
Returns
------------
gdf_points: GeoDataFrame
geodataframe (Points) of centroids shapefile
gdf_node_pos: GeoDataFrame
geodataframe (Points) of nodes obtained from all links in the network shapefile.
The IsCentroid information is attached if a node is the closest node from a centroid.
gdf: GeoDataFrame
geodataframe (LineString) of the original network, containing information about the start
node (FNODE) and end node (TNODE). The IsCentroid information is attached as well.
'''
#read the network shapefile into geodataframe
gdf = prepare_gdf_network(network)
if not 'road' in gdf.columns:
gdf['road'] = gdf[net_from] + gdf[net_to]
#take all nodes from the network geodataframe into dataframe
df_node_pos = gdf[['Start_pos', 'FNODE_', 'road']].rename(columns={'Start_pos': 'pos', 'FNODE_': 'Node', 'road': 'road' }).append(
gdf[['End_pos', 'TNODE_', 'road']].rename(columns={'End_pos': 'pos', 'TNODE_': 'Node', 'road': 'road' }))
#drop all duplicate nodes
df_node_pos = df_node_pos.drop_duplicates(subset='Node')
#change the column name
df_node_pos.columns = ['geometry', 'Node', 'road']
#check if there is any penalty, store it in df_node_pos
if 'penalty' in gdf.columns:
df_node_pos['penalty'] = df_node_pos.Node.apply(lambda node: max(gdf.loc[(gdf['TNODE_']==node) | (gdf['FNODE_']==node) ]['penalty']))
#add column of POINT type for the geometry
df_node_pos['pointgeo'] = [Point(xy) for xy in df_node_pos.geometry]
#reindex the dataframe
df_node_pos.index = range(len(df_node_pos))
#save the longitude (x) and latitude(y) separately
xy = np.array(df_node_pos['geometry'].tolist())
x = [xy[i,0] for i in range(len(xy))]
y = [xy[i,1] for i in range(len(xy))]
df_node_pos['x'] = x
df_node_pos['y'] = y
#create geodataframe of the network points from dataframe
gdf_node_pos = gp.GeoDataFrame(df_node_pos, crs=gdf.crs, geometry=df_node_pos.pointgeo)
gdf_node_pos['osmid'] = gdf_node_pos.index
centroid_points = {}
points = []
for n, centroid in enumerate(centroids):
#read the centroid shapefile into geodataframe
gdf_points = gp.read_file(centroid)
#reference the Node ID of the network to the centroids by selecting the nearest node from the centroid points
if 'mode' in gdf.columns:
gdf_node_pos_road = gdf_node_pos.loc[gdf_node_pos['road'].isnull()==False]
gdf_node_pos_road.index = np.arange(0,len(gdf_node_pos_road),1)
gdf_points['Node'] = gdf_points.geometry.apply(lambda g: gdf_node_pos_road.iloc[gdf_node_pos_road.distance(g).idxmin()].Node)
else:
gdf_points['Node'] = gdf_points.geometry.apply(lambda g: gdf_node_pos.iloc[gdf_node_pos.distance(g).idxmin()].Node)
nodes = gdf_points['Node'].tolist()
numbers = [n+1] * len(nodes)
centroid_points.update(dict(zip(nodes, numbers)))
points.append(gdf_points)
gdf_node_pos['IsCentroid'] = gdf_node_pos.Node.apply(lambda g: centroid_points[g] if g in centroid_points.keys() else 0)
#adding Centroid information to the gdf
gdf['IsCentroid1'] = gdf.TNODE_.apply(lambda g: centroid_points[g] if g in centroid_points.keys() else 0)
gdf['IsCentroid2'] = gdf.FNODE_.apply(lambda g: centroid_points[g] if g in centroid_points.keys() else 0)
gdf['IsCentroid'] = gdf[['IsCentroid1','IsCentroid2']].max(axis=1)
del gdf['IsCentroid1']
del gdf['IsCentroid2']
#create unique osmid for the network LineString GeoDataFrame
gdf['osmid'] = gdf.index.map(lambda x: x + 10000)
return points, gdf_node_pos, gdf
def prepare_gdf_network(network):
'''
Converting transport network shapefile into GeoDataFrame
Parameters
------------
network: str
string of network shapefile's address+filename
Returns
------------
gdf: GeoDataFrame
geodataframe of network with linestring, coordinate of start position, and
coordinate of end position recorded
'''
# Load network shapefile into GeoDataFrame
gdf = gp.read_file(network)
# !!! Add column capacity for min max cut flow algorithm
# gdf['capacity'] = gdf['RD_CLASS']
# shapefile needs to include minimal: geometry linestring and the length computed (e.g. in QGIS)
if 'length' not in gdf.columns:
print('Shapefile is invalid: length not in attributes:\n{}'.format(gdf.columns))
print("length will be automatically generated based on LineString's length")
gdf['length'] = gdf['geometry'].apply(lambda line: line.length)
if not gdf.geometry.map(lambda x: type(x) == LineString).all():
s_invalid_geo = gdf.geometry[gdf.geometry.map(lambda x: type(x) == LineString)]
raise Exception('Shapefile is invalid: geometry not all linestring \n{}'.format(s_invalid_geo))
# Compute the start- and end-position based on linestring
gdf['Start_pos'] = gdf.geometry.apply(lambda x: x.coords[0])
gdf['End_pos'] = gdf.geometry.apply(lambda x: x.coords[-1])
# Create Series of unique nodes and their associated position
s_points = gdf.Start_pos.append(gdf.End_pos).reset_index(drop=True)
s_points = s_points.drop_duplicates()
# Add index of start and end node of linestring to geopandas DataFrame
df_points = pd.DataFrame(s_points, columns=['Start_pos'])
df_points['FNODE_'] = df_points.index
gdf = pd.merge(gdf, df_points, on='Start_pos', how='inner')
df_points = pd.DataFrame(s_points, columns=['End_pos'])
df_points['TNODE_'] = df_points.index
gdf = pd.merge(gdf, df_points, on='End_pos', how='inner')
return gdf
def prepare_centroids_network(centroid, network):
'''
Take transport network and centroids shapefiles as inputs
then returns a geodataframe of the transport network with
indicated centroid nodes
Parameters
------------
centroid: str
string of centroid shapefile's address+filename
network: str
string of network shapefile's address+name
Returns
------------
gdf_points: GeoDataFrame
geodataframe (Points) of centroids shapefile
gdf_node_pos: GeoDataFrame
geodataframe (Points) of nodes obtained from all links in the network shapefile.
The IsCentroid information is attached if a node is the closest node from a centroid.
gdf: GeoDataFrame
geodataframe (LineString) of the original network, containing information about the start
node (FNODE) and end node (TNODE). The IsCentroid information is attached as well.
'''
#read the centroid shapefile into geodataframe
gdf_points = gp.read_file(centroid)
#read the network shapefile into geodataframe
gdf = prepare_gdf_network(network)
#take all nodes from the network geodataframe into dataframe
df_node_pos = gdf[['Start_pos', 'FNODE_', 'road']].rename(columns={'Start_pos': 'pos', 'FNODE_': 'Node', 'road': 'road' }).append(
gdf[['End_pos', 'TNODE_', 'road']].rename(columns={'End_pos': 'pos', 'TNODE_': 'Node', 'road': 'road' }))
#drop all duplicate nodes
df_node_pos = df_node_pos.drop_duplicates(subset='Node')
#change the column name
df_node_pos.columns = ['geometry', 'Node', 'road']
#check if there is any penalty, store it in df_node_pos
if 'penalty' in gdf.columns:
df_node_pos['penalty'] = df_node_pos.Node.apply(lambda node: max(gdf.loc[(gdf['TNODE_']==node) | (gdf['FNODE_']==node) ]['penalty']))
#add column of POINT type for the geometry
df_node_pos['pointgeo'] = [Point(xy) for xy in df_node_pos.geometry]
#reindex the dataframe
df_node_pos.index = range(len(df_node_pos))
#save the longitude (x) and latitude(y) separately
xy = np.array(df_node_pos['geometry'].tolist())
x = [xy[i,0] for i in range(len(xy))]
y = [xy[i,1] for i in range(len(xy))]
df_node_pos['x'] = x
df_node_pos['y'] = y
#create geodataframe of the network points from dataframe
gdf_node_pos = gp.GeoDataFrame(df_node_pos, crs=gdf.crs, geometry=df_node_pos.pointgeo)
gdf_node_pos['osmid'] = gdf_node_pos.index
#reference the Node ID of the network to the centroids by selecting the nearest node from the centroid points
if 'mode' in gdf.columns:
gdf_node_pos_road = gdf_node_pos.loc[gdf_node_pos['road'].isnull()==False]
gdf_node_pos_road.index = np.arange(0,len(gdf_node_pos_road),1)
gdf_points['Node'] = gdf_points.geometry.apply(lambda g: gdf_node_pos_road.iloc[gdf_node_pos_road.distance(g).idxmin()].Node)
else:
gdf_points['Node'] = gdf_points.geometry.apply(lambda g: gdf_node_pos.iloc[gdf_node_pos.distance(g).idxmin()].Node)
OD = gdf_points['Node'].tolist()
gdf_node_pos['IsCentroid'] = gdf_node_pos.Node.apply(lambda g: 1 if g in OD else 0)
#adding Centroid information to the gdf
gdf['IsCentroid1'] = gdf.TNODE_.apply(lambda g: 1 if g in OD else 0)
gdf['IsCentroid2'] = gdf.FNODE_.apply(lambda g: 1 if (g in OD) else 0)
gdf['IsCentroid'] = gdf['IsCentroid1'] + gdf['IsCentroid2']
del gdf['IsCentroid1']
del gdf['IsCentroid2']
#create unique osmid for the network LineString GeoDataFrame
gdf['osmid'] = gdf.index.map(lambda x: x + 10000)
return gdf_points, gdf_node_pos, gdf
def gdf_to_simplified_multidigraph(gdf_node_pos, gdf, undirected = True):
'''
Simplifying transport network (in GeoDataFrame format) by removing all nodes which are
neither intersections, end/start nodes, nor centroids. This reduces the computation time
needed to conduct any analysis later.
Parameters
------------
gdf_node_pos: GeoDataFrame
geodataframe (Points) of nodes obtained from all links in the network shapefile.
The IsCentroid information is attached if a node is the closest node from a centroid.
gdf: GeoDataFrame
geodataframe (LineString) of the original transport network.
Returns
------------
G2: MultiGraph, MultiDiGraph
Multi(Di)Graph Networkx object of simplified transport network. Multi(Di)Graph network type
is chosen because the graph simplification algorithm only works with this type of network.
'''
#create a MultiDiGraph object
G2 = nx.MultiDiGraph(crs=gdf.crs)
#create nodes on the MultiDiGraph
for index, row in gdf_node_pos.iterrows():
if 'penalty' in row.index:
c = {'x': row.x, 'y': row.y, 'IsCentroid' : row.IsCentroid, 'ID' : row.Node, 'osmid': row.osmid, 'penalty': row.penalty}
G2.add_node(row.Node, **c)
else:
c = {'x': row.x, 'y': row.y, 'IsCentroid' : row.IsCentroid, 'ID' : row.Node, 'osmid': row.osmid}
G2.add_node(row.Node, **c)
#create bidirectional edges on top of the MultiDiGraph nodes
#based on the FNODE and TNODE information of the transport network GeoDataFrame
for index, row in gdf.iterrows():
dict_row = row.to_dict()
if 'geometry' in dict_row:
del dict_row['geometry']
G2.add_edge(dict_row['FNODE_'], dict_row['TNODE_'], **dict_row)
#simplify the MultiDiGraph using OSMNX's overwritten function
#there should be no NaN values in the geodataframe, else it will return an error
G2 = simp_g(G2)
#make a name
G2.graph['name'] = 'graph'
if undirected:
G2 = G2.to_undirected()
return G2
def multigraph_to_graph(digraph):
'''
Change Multi(Di)Graph object to Graph. Graph is undirected, simple graph type without parallel edges
(while Multi(Di)Graph may have parallel edges). This code removes duplicate edges.
Parameters
------------
G: MultiGraph, MultiDiGraph
Multi(Di)Graph Networkx object of simplified transport network
Returns
------------
G2_new_tograph: Graph
Graph Networkx object
'''
#create empty Graph object
graph = nx.Graph()
#transform the nodes into Graph, preserving all attributes
for u,v in digraph.nodes(data=True):
graph.add_node(u, **v)
#transform the edges into Graph, preserving all attributes
c = set()
for u, v, data in digraph.edges(data=True):
d = (u, v)
#check if the edge that connects (u,v) exists in the graph
if d in c:
# replace the old edge with the new edge if
# the new edge has longer length
edge = graph.edges[u,v]
if data['length'] > edge['length']:
graph.remove_edge(u,v)
graph.add_edge(u,v,**data)
else:
graph.add_edge(u,v,**data)
c.add(d)
return graph
def graph_to_df(G2_simplified):
'''
Change Graph Networkx object back to GeoDataFrame. This helps for visualization purpose,
as GeoDataFrame has more flexibility in displaying the transport network.
Parameters
------------
G2_simplified: Graph
(Simplified) Graph Networkx object
Returns
------------
gdf_edges: GeoDataFrame
GeoDataFrame (Linestring) of the Graph Networkx object
'''
#get undirected Graph from MultiDiGraph
G2_simplified3 = G2_simplified.copy()
#create a copy for safer operation
G_save = G2_simplified3.copy()
#create dictionaries of nodes from the undirected Graph
nodes = {node:data for node, data in G_save.nodes(data=True)}
#create GeoDataFrame of nodes
gdf_nodes = gp.GeoDataFrame(nodes).T
#change the CRS (coordinate reference system) into EPSG:4326
gdf_nodes.crs = {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84'}
#define the geometry attribute of the GeoDataFrame
gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1)
gdf_nodes = gdf_nodes.drop(['x', 'y'], axis=1)
#ensure the osmid is in integer
gdf_nodes['osmid'] = gdf_nodes['osmid'].astype(np.int64)
#remove all nodes that do not have geometry information
for col in [c for c in gdf_nodes.columns if not c == 'geometry']:
gdf_nodes[col] = gdf_nodes[col].fillna('').map(make_str)
#create list of edges
edges = []
for u, v, data in G_save.edges(data=True):
edge_details = data
edge_details['FNODE_'] = u
edge_details['TNODE_'] = v
# if edge doesn't already have a geometry attribute, create one now
if not 'geometry' in data:
point_u = Point((G_save.node[u]['x'], G_save.node[u]['y']))
point_v = Point((G_save.node[v]['x'], G_save.node[v]['y']))
edge_details['geometry'] = LineString([point_u, point_v])
edges.append(edge_details)
#create GeoDataFrame of edges
gdf_edges = gp.GeoDataFrame(edges)
#change the CRS into EPSG:4326
gdf_edges.crs = {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84'}
#if any element of the GeoDataFrame contains more than one information (due to the graph simplification)
for i in gdf_edges.columns:
#select only one element which is most relevant
gdf_edges[i] = gdf_edges[i].apply(lambda x: _last_list_element(x))
#delete all irrelevant columns
del gdf_edges['End_pos']
# del gdf_edges['FNODE_']
# del gdf_edges['TNODE_']
# del gdf_edges['osmid']
del gdf_edges['Start_pos']
# del gdf_edges['capacity']
return gdf_edges
def _last_list_element(x):
#utility for multidigraph_to_shapefile function
#take the last element of a list
if type(x) == list:
x = x[-1]
return x
def prepare_adm_background(adm_csv, adm_shp, csv_column_list):
'''
Preparing geospatial administrative area background in GeoDataFrame. Merge various socioeconomic
information from another csv. The csv and the shp should have at least one column with identical values
to map to each other.
Parameters
------------
adm_csv: str
string of socioeconomic csv file address+filename
adm_shp: str
string of administrative area csv file address+filename
csv_column_list: list
list of string of column names from the adm_csv file that want to be added into the
resulting GeoDataFrame
Returns
------------
district_gdf2: GeoDataFrame
GeoDataFrame (Polygon) of administrative area and its corresponding socioeconomic data
'''
#read district data statistics
district_df = pd.read_csv(adm_csv)
#read adm level 2 district shapefile
district_gdf = gp.read_file(adm_shp)
#extract only the intended columns
district_df2 = district_df[csv_column_list]
#change the 'code' column into 'HASC_2' so that it can be merged with the shp file
district_df2.rename(columns={'Code':'HASC_2'}, inplace=True)
#combine gdf and df
district_gdf2 = pd.merge(district_gdf, district_df2, on='HASC_2')
return district_gdf2
def create_link_capacity(G, item1, item2='length', calctype='multiplication'):
'''
Preparing capacity of a link for unimodal transport network (i.e. road or waterway or railway separately).
This function (currently) only perform simple multiplication or division between the two items
in order to generate capacity attribute.
Parameters
------------
G: Graph
Networkx Graph object of a unimodal transport network
item1: str
string of the Graph's data attribute that want to be used as the first component of capacity calculation
item2: str
string of the Graph's data attribute that want to be used as the secoond component of capacity calculation
calctype: str
operation that will be done to item1 and item2. can be either multiplication or division
Returns
------------
G1: Graph
Networkx Graph object of a unimodal transport network with capacity attribute embedded in its edges
'''
capacity_dict = {}
G1 = G.copy()
for u,v,data in G.edges(data=True):
if type(data[item1]) != list:
component1 = data[item1]
else:
component1 = min(data[item1])
if type(data[item2]) != list:
component2 = data[item2]
else:
component2 = min(data[item2])
edge = tuple([u,v])
if calctype == 'multiplication':
capacity = component1 * component2
elif calctype == 'division':
capacity = component1 / component2
capacity_dict.update({edge:capacity})
nx.set_edge_attributes(G1, 'lcapacity', capacity_dict)
return G1
def check_connected_components(gdf):
'''
print the number of connected components of network from a GeoDataFrame
'''
gdf['start_point'] = gdf.geometry.apply(lambda x: str(x.coords[0]))
gdf['end_point'] = gdf.geometry.apply(lambda x: str(x.coords[-1]))
graph = nx.Graph(crs=gdf.crs)
for index, row in gdf.iterrows():
graph.add_edge(row.start_point, row.end_point)
n = nx.number_connected_components(graph)
del gdf['start_point']
del gdf['end_point']
print(n)
def read_dotfile(dotfile,
output='dataframe',
crs={'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84'},
redundancy=True):
'''
Read network data in a dot file format into a DataFrame or a GeoDataFrame.
Has only been tested for the output files from Bangladesh transport simulation model (https://github.com/quaquel/bangladesh_roadtransport).
Parameters
------------
dotfile: str
File location of the dot file
output: str
if 'dataframe', return pandas DataFrame, if 'geodataframe', return GeoDataFrame
redundancy: bool
if False, remove redundant links, if True, retain redundant links
Returns
------------
links_df/links_gdf: DataFrame/GeoDataFrame
(Geo)DataFrame of the links in the network
nodes_df/nodes_gdf: DataFrame/GeoDataFrame
(Geo)DataFrame of the nodes in the network
'''
#attributes for links dataframe
FNODE = []
TNODE = []
length = []
FLRP = []
TLRP = []
#attribues for nodes dataframe
node_id = []
LRP = []
x_s = [] #identical to longitude
y_s = [] #identical to latitude
f=open(dotfile,'r')
tot_line = sum(1 for _ in f)
f=open(dotfile,'r')
for i, line in enumerate(f):
#skip the first row
if i > 0 and i<tot_line-1:
#for the nodes dataframe
if '->' in line:
#take from and to nodes
split_1a = line.split('->')
from_node = int(split_1a[0].replace(" ",""))
split_1b = split_1a[1].split('[')
to_node = int(split_1b[0].replace(" ",""))
#take length information
split_2 = line.split('length=')
dist = float(split_2[1].replace('"','').replace(" ","").replace(']','').replace(';',''))
#take LRP information
if 'link=' in line:
split_3a = line.split('link=')
split_3b = split_3a[1].split(']')
split_3c = split_3b[0].split('-')
lrpfrom = split_3c[0]
lrpto = split_3c[1]
FLRP.append(lrpfrom)
TLRP.append(lrpto)
FNODE.append(from_node)
TNODE.append(to_node)
length.append(dist)
else:
#take node id
split_1 = line.split('[')
try:
n_id = int(split_1[0].replace(' ',''))
except:
print(line,i)
#take lrp
if 'id=' in line:
split_2a = line.split('id=')
split_2b = split_2a[1].split(',')
n_lrp = split_2b[0]
#take x and y coordinate
split_3a = line.split('point=')
for ch in ['(',')',']','"',';',' ']:
split_3a[1] = split_3a[1].replace(ch,'')
split_3b = split_3a[1].split(',')
x = float(split_3b[0])
y = float(split_3b[1])
node_id.append(n_id)
LRP.append(n_lrp)
x_s.append(x)
y_s.append(y)
#create dataframes
nodes_df = pd.DataFrame({'id':node_id, 'LRP':LRP, 'x':x_s, 'y':y_s})
links_df = pd.DataFrame({'FNODE_':FNODE, 'TNODE_':TNODE, 'length':length, 'from':FLRP, 'to':TLRP})
if not redundancy: #remove redundant links, should have 'from' and 'to' columns
links_df['identifier'] = links_df.apply(lambda row: sorted([row['from'], row['to']]), axis=1)
links_df['identifier'] = links_df['identifier'].apply(lambda idt: str(idt[0])+str(idt[1]))
links_df.drop_duplicates(subset=['identifier', 'length'], inplace=True)
del links_df['identifier']
if output=='dataframe':
return nodes_df, links_df
elif output=='geodataframe':
#modify nodes_df
nodes_df['geometry'] = nodes_df.apply(lambda row: Point(row['x'], row['y']), axis=1)
nodes_gdf = gp.GeoDataFrame(nodes_df, crs=crs, geometry=nodes_df['geometry'])
#modify links_df
links_df['from_p'] = links_df['FNODE_'].apply(lambda f:
nodes_gdf.loc[nodes_gdf['id']==f]['geometry'].iloc[0])
links_df['to_p'] = links_df['TNODE_'].apply(lambda t:
nodes_gdf.loc[nodes_gdf['id']==t]['geometry'].iloc[0])
links_df['geometry'] = links_df.apply(lambda row: LineString([row['from_p'], row['to_p']]), axis=1)
links_gdf = gp.GeoDataFrame(links_df, crs=crs, geometry=links_df['geometry'])
#delete unnecessary column
del links_gdf['from_p']
del links_gdf['to_p']
return nodes_gdf, links_gdf | {
"content_hash": "e4f7220b3dfabb2cb694e4b74a6a8d55",
"timestamp": "",
"source": "github",
"line_count": 669,
"max_line_length": 143,
"avg_line_length": 38.70254110612855,
"alnum_prop": 0.6019233740151398,
"repo_name": "bramkaarga/transcrit",
"id": "879ad9137a640fcdf1d46f1fc048bd025901c69d",
"size": "26303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transport_network_modeling/network_prep.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9971810"
},
{
"name": "Python",
"bytes": "225619"
}
],
"symlink_target": ""
} |
from .axpositioning import PositioningAxes
from .gui import adjust_figure_layout
from .subplots import hsubplots, xyshared_plots | {
"content_hash": "21d31104293178798392e6a74df4fc19",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 47,
"avg_line_length": 42.666666666666664,
"alnum_prop": 0.8515625,
"repo_name": "johndamen/axpositioning",
"id": "157fa0966fae89f2a233002d566581c176aab6af",
"size": "128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "axpositioning/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54811"
}
],
"symlink_target": ""
} |
author = 'J. Michael Caine'
version = '0.1'
license = 'MIT'
| {
"content_hash": "66fc853dec02e4c4f907ad8e94bcb959",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 27,
"avg_line_length": 20,
"alnum_prop": 0.6333333333333333,
"repo_name": "jmcaine/threeslides",
"id": "f408083b7bc4f62b4b65ff00366a7dbf584237a2",
"size": "61",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "threeslides/_meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11161"
},
{
"name": "HTML",
"bytes": "41380"
},
{
"name": "JavaScript",
"bytes": "361143"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "66575"
},
{
"name": "Shell",
"bytes": "431"
}
],
"symlink_target": ""
} |
from collections import defaultdict, deque, OrderedDict
def _sort_transactions_by_freq(transactions, key_func, reverse_int=False,
reverse_ext=False, sort_ext=True):
key_seqs = [{key_func(i) for i in sequence} for sequence in transactions]
frequencies = get_frequencies(key_seqs)
asorted_seqs = []
for key_seq in key_seqs:
if not key_seq:
continue
# Sort each transaction (infrequent key first)
l = [(frequencies[i], i) for i in key_seq]
l.sort(reverse=reverse_int)
asorted_seqs.append(tuple(l))
# Sort all transactions. Those with infrequent key first, first
if sort_ext:
asorted_seqs.sort(reverse=reverse_ext)
return (asorted_seqs, frequencies)
def get_frequencies(transactions):
'''Computes a dictionary, {key:frequencies} containing the frequency of
each key in all transactions. Duplicate keys in a transaction are
counted twice.
:param transactions: a sequence of sequences. [ [transaction items...]]
'''
frequencies = defaultdict(int)
for transaction in transactions:
for item in transaction:
frequencies[item] += 1
return frequencies
def get_sam_input(transactions, key_func=None):
'''Given a list of transactions and a key function, returns a data
structure used as the input of the sam algorithm.
:param transactions: a sequence of sequences. [ [transaction items...]]
:param key_func: a function that returns a comparable key for a
transaction item.
'''
if key_func is None:
key_func = lambda e: e
(asorted_seqs, _) = _sort_transactions_by_freq(transactions, key_func)
# Group same transactions together
sam_input = deque()
visited = {}
current = 0
for seq in asorted_seqs:
if seq not in visited:
sam_input.append((1, seq))
visited[seq] = current
current += 1
else:
i = visited[seq]
(count, oldseq) = sam_input[i]
sam_input[i] = (count + 1, oldseq)
return sam_input
def sam(sam_input, min_support=2):
'''Finds frequent item sets of items appearing in a list of transactions
based on the Split and Merge algorithm by Christian Borgelt.
:param sam_input: The input of the algorithm. Must come from
`get_sam_input`.
:param min_support: The minimal support of a set to be included.
:rtype: A set containing the frequent item sets and their support.
'''
fis = set()
report = {}
_sam(sam_input, fis, report, min_support)
return report
def _sam(sam_input, fis, report, min_support):
n = 0
a = deque(sam_input)
while len(a) > 0 and len(a[0][1]) > 0:
b = deque()
s = 0
i = a[0][1][0]
while len(a) > 0 and len(a[0][1]) > 0 and a[0][1][0] == i:
s = s + a[0][0]
a[0] = (a[0][0], a[0][1][1:])
if len(a[0][1]) > 0:
b.append(a.popleft())
else:
a.popleft()
c = deque(b)
d = deque()
while len(a) > 0 and len(b) > 0:
if a[0][1] > b[0][1]:
d.append(b.popleft())
elif a[0][1] < b[0][1]:
d.append(a.popleft())
else:
b[0] = (b[0][0] + a[0][0], b[0][1])
d.append(b.popleft())
a.popleft()
while len(a) > 0:
d.append(a.popleft())
while len(b) > 0:
d.append(b.popleft())
a = d
if s >= min_support:
fis.add(i[1])
report[frozenset(fis)] = s
#print('{0} with support {1}'.format(fis, s))
n = n + 1 + _sam(c, fis, report, min_support)
fis.remove(i[1])
return n
def _new_relim_input(size, key_map):
i = 0
l = []
for key in key_map:
if i >= size:
break
l.append(((0, key), []))
i = i + 1
return l
def _get_key_map(frequencies):
l = [(frequencies[k], k) for k in frequencies]
l.sort(reverse=True)
key_map = OrderedDict()
for i, v in enumerate(l):
key_map[v] = i
return key_map
def get_relim_input(transactions, key_func=None):
'''Given a list of transactions and a key function, returns a data
structure used as the input of the relim algorithm.
:param transactions: a sequence of sequences. [ [transaction items...]]
:param key_func: a function that returns a comparable key for a
transaction item.
'''
# Data Structure
# relim_input[x][0] = (count, key_freq)
# relim_input[x][1] = [(count, (key_freq, )]
#
# in other words:
# relim_input[x][0][0] = count of trans with prefix key_freq
# relim_input[x][0][1] = prefix key_freq
# relim_input[x][1] = lists of transaction rests
# relim_input[x][1][x][0] = number of times a rest of transaction appears
# relim_input[x][1][x][1] = rest of transaction prefixed by key_freq
if key_func is None:
key_func = lambda e: e
(asorted_seqs, frequencies) = _sort_transactions_by_freq(transactions,
key_func)
key_map = _get_key_map(frequencies)
relim_input = _new_relim_input(len(key_map), key_map)
for seq in asorted_seqs:
if not seq:
continue
index = key_map[seq[0]]
((count, char), lists) = relim_input[index]
rest = seq[1:]
found = False
for i, (rest_count, rest_seq) in enumerate(lists):
if rest_seq == rest:
lists[i] = (rest_count + 1, rest_seq)
found = True
break
if not found:
lists.append((1, rest))
relim_input[index] = ((count + 1, char), lists)
return (relim_input, key_map)
def relim(rinput, min_support=2):
'''Finds frequent item sets of items appearing in a list of transactions
based on Recursive Elimination algorithm by Christian Borgelt.
In my synthetic tests, Relim outperforms other algorithms by a large
margin. This is unexpected as FP-Growth is supposed to be superior, but
this may be due to my implementation of these algorithms.
:param rinput: The input of the algorithm. Must come from
`get_relim_input`.
:param min_support: The minimal support of a set to be included.
:rtype: A set containing the frequent item sets and their support.
'''
fis = set()
report = {}
_relim(rinput, fis, report, min_support)
return report
def _relim(rinput, fis, report, min_support):
(relim_input, key_map) = rinput
n = 0
# Maybe this one isn't necessary
#a = deque(relim_input)
a = relim_input
while len(a) > 0:
item = a[-1][0][1]
s = a[-1][0][0]
if s >= min_support:
fis.add(item[1])
#print('Report {0} with support {1}'.format(fis, s))
report[frozenset(fis)] = s
b = _new_relim_input(len(a) - 1, key_map)
rest_lists = a[-1][1]
for (count, rest) in rest_lists:
if not rest:
continue
k = rest[0]
index = key_map[k]
new_rest = rest[1:]
# Only add this rest if it's not empty!
((k_count, k), lists) = b[index]
if len(new_rest) > 0:
lists.append((count, new_rest))
b[index] = ((k_count + count, k), lists)
n = n + 1 + _relim((b, key_map), fis, report, min_support)
fis.remove(item[1])
rest_lists = a[-1][1]
for (count, rest) in rest_lists:
if not rest:
continue
k = rest[0]
index = key_map[k]
new_rest = rest[1:]
((k_count, k), lists) = a[index]
if len(new_rest) > 0:
lists.append((count, new_rest))
a[index] = ((k_count + count, k), lists)
a.pop()
return n
class FPNode(object):
root_key = object()
def __init__(self, key, parent):
self.children = {}
self.parent = parent
self.key = key
self.count = 0
self.next_node = None
def add_path(self, path, index, length, heads, last_insert):
if index >= length:
return
child_key = path[index]
index += 1
try:
child = self.children[child_key]
except Exception:
child = self._create_child(child_key, heads, last_insert)
child.count += 1
heads[child_key][1] += 1
child.add_path(path, index, length, heads, last_insert)
def _create_child(self, child_key, heads, last_insert):
child = FPNode(child_key, self)
self.children[child_key] = child
try:
last_child = last_insert[child_key]
last_child.next_node = child
except Exception:
heads[child_key] = [child, 0]
last_insert[child_key] = child
return child
def get_cond_tree(self, child, count, visited, heads, last_insert,
dont_create=False):
key = self.key
if dont_create:
# This is a head, we don't want to copy it.
cond_node = None
else:
try:
cond_node = visited[self]
except Exception:
cond_node = self._create_cond_child(visited, heads,
last_insert)
if self.parent is not None:
# Recursion
parent_node = self.parent.get_cond_tree(cond_node, count, visited,
heads, last_insert, False)
if cond_node is not None:
cond_node.count += count
heads[key][1] += count
cond_node.parent = parent_node
return cond_node
def _create_cond_child(self, visited, heads, last_insert):
key = self.key
cond_node = FPNode(key, None)
visited[self] = cond_node
try:
last_cond_node = last_insert[key]
last_cond_node.next_node = cond_node
except Exception:
# Don't add root!
if self.parent is not None:
heads[key] = [cond_node, 0]
last_insert[key] = cond_node
return cond_node
def _find_ancestor(self, heads, min_support):
ancestor = self.parent
while ancestor.key != FPNode.root_key:
support = heads[ancestor.key][1]
if support >= min_support:
break
else:
ancestor = ancestor.parent
return ancestor
def prune_me(self, from_head_list, visited_parents, merged_before,
merged_now, heads, min_support):
try:
# Parent was merged
new_parent = merged_before[self.parent]
self.parent = new_parent
except KeyError:
# Ok, no need to change parent
pass
ancestor = self._find_ancestor(heads, min_support)
self.parent = ancestor
try:
# Oh, we visited another child of this parent!
other_node = visited_parents[ancestor]
merged_now[self] = other_node
other_node.count += self.count
# Remove yourself from the list
if from_head_list is not None:
from_head_list.next_node = self.next_node
self.next_node = None
except KeyError:
# We are a new child!
visited_parents[ancestor] = self
def __str__(self):
child_str = ','.join([str(key) for key in self.children])
return '{0} ({1}) [{2}] {3}'.format(self.key, self.count, child_str,
self.next_node is not None)
def __repr__(self):
return self.__str__()
def get_fptree(transactions, key_func=None, min_support=2):
'''Given a list of transactions and a key function, returns a data
structure used as the input of the relim algorithm.
:param transactions: a sequence of sequences. [ [transaction items...]]
:param key_func: a function that returns a comparable key for a
transaction item.
:param min_support: minimum support.
'''
if key_func is None:
key_func = lambda e: e
asorted_seqs, frequencies = _sort_transactions_by_freq(transactions,
key_func, True, False, False)
transactions = [[item[1] for item in aseq if item[0] >= min_support] for
aseq in asorted_seqs]
root = FPNode(FPNode.root_key, None)
heads = {}
last_insert = {}
for transaction in transactions:
root.add_path(transaction, 0, len(transaction), heads, last_insert)
# Here, v[1] is = to the frequency
sorted_heads = sorted(heads.values(), key=lambda v: (v[1], v[0].key))
new_heads = OrderedDict()
for (head, head_support) in sorted_heads:
new_heads[head.key] = (head, head_support)
#new_heads = tuple(heads.values())
return (root, new_heads)
def _init_heads(orig_heads):
new_heads = OrderedDict()
for key in orig_heads:
new_heads[key] = (None, 0)
return new_heads
def _create_cond_tree(head_node, new_heads, pruning):
visited = {}
last_insert = {}
while head_node is not None:
head_node.get_cond_tree(None, head_node.count, visited, new_heads,
last_insert, True)
head_node = head_node.next_node
return new_heads
def _prune_cond_tree(heads, min_support):
merged_before = {}
merged_now = {}
for key in reversed(heads):
(node, head_support) = heads[key]
if head_support > 0:
visited_parents = {}
previous_node = None
while node is not None:
# If the node is merged, we lose the next_node
next_node = node.next_node
node.prune_me(previous_node, visited_parents, merged_before,
merged_now, heads, min_support)
if node.next_node is not None:
# Only change the previous node if it wasn't merged.
previous_node = node
node = next_node
merged_before = merged_now
merged_now = {}
def fpgrowth(fptree, min_support=2, pruning=False):
'''Finds frequent item sets of items appearing in a list of transactions
based on FP-Growth by Han et al.
:param fptree: The input of the algorithm. Must come from
`get_fptree`.
:param min_support: The minimal support of a set.
:param pruning: Perform a pruning operation. Default to False.
:rtype: A set containing the frequent item sets and their support.
'''
fis = set()
report = {}
_fpgrowth(fptree, fis, report, min_support, pruning)
return report
def _fpgrowth(fptree, fis, report, min_support=2, pruning=True):
(_, heads) = fptree
n = 0
for (head_node, head_support) in heads.values():
if head_support < min_support:
continue
fis.add(head_node.key)
#print('Report {0} with support {1}'.format(fis, head_support))
report[frozenset(fis)] = head_support
new_heads = _init_heads(heads)
_create_cond_tree(head_node, new_heads, pruning)
if pruning:
_prune_cond_tree(new_heads, min_support)
n = n + 1 + _fpgrowth((None, new_heads), fis, report, min_support,
pruning)
fis.remove(head_node.key)
return n
| {
"content_hash": "9d36fa75f62d66daee069b5f62fa2798",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 78,
"avg_line_length": 32.34647302904564,
"alnum_prop": 0.5561541915207492,
"repo_name": "wwdxfa/pymining",
"id": "fc8c680519f20a03964f68baa9018512af543b5d",
"size": "15591",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymining/itemmining.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "29468"
}
],
"symlink_target": ""
} |
"""Copyright 2019-2022 XGBoost contributors"""
import asyncio
import json
import os
import pickle
import socket
import subprocess
import tempfile
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from itertools import starmap
from math import ceil
from operator import attrgetter, getitem
from pathlib import Path
from typing import Any, Dict, Generator, Optional, Tuple, Type, Union
import hypothesis
import numpy as np
import pytest
import scipy
import sklearn
from hypothesis import HealthCheck, given, note, settings
from sklearn.datasets import make_classification, make_regression
from xgboost.data import _is_cudf_df
from xgboost.testing.params import hist_parameter_strategy
from xgboost.testing.shared import (
get_feature_weights,
validate_data_initialization,
validate_leaf_output,
)
import xgboost as xgb
from xgboost import testing as tm
pytestmark = [tm.timeout(30), pytest.mark.skipif(**tm.no_dask())]
import dask
import dask.array as da
import dask.dataframe as dd
from distributed import Client, LocalCluster
from toolz import sliding_window # dependency of dask
from xgboost.dask import DaskDMatrix
dask.config.set({"distributed.scheduler.allowed-failures": False})
if hasattr(HealthCheck, 'function_scoped_fixture'):
suppress = [HealthCheck.function_scoped_fixture]
else:
suppress = hypothesis.utils.conventions.not_set # type:ignore
@pytest.fixture(scope="module")
def cluster() -> Generator:
with LocalCluster(
n_workers=2, threads_per_worker=2, dashboard_address=":0"
) as dask_cluster:
yield dask_cluster
@pytest.fixture
def client(cluster: "LocalCluster") -> Generator:
with Client(cluster) as dask_client:
yield dask_client
kRows = 1000
kCols = 10
kWorkers = 5
def make_categorical(
client: Client,
n_samples: int,
n_features: int,
n_categories: int,
onehot: bool = False,
) -> Tuple[dd.DataFrame, dd.Series]:
workers = tm.get_client_workers(client)
n_workers = len(workers)
dfs = []
def pack(**kwargs: Any) -> dd.DataFrame:
X, y = tm.make_categorical(**kwargs)
X["label"] = y
return X
meta = pack(
n_samples=1, n_features=n_features, n_categories=n_categories, onehot=False
)
for i, worker in enumerate(workers):
l_n_samples = min(
n_samples // n_workers, n_samples - i * (n_samples // n_workers)
)
future = client.submit(
pack,
n_samples=l_n_samples,
n_features=n_features,
n_categories=n_categories,
onehot=False,
workers=[worker],
)
dfs.append(future)
df = dd.from_delayed(dfs, meta=meta)
y = df["label"]
X = df[df.columns.difference(["label"])]
if onehot:
return dd.get_dummies(X), y
return X, y
def generate_array(
with_weights: bool = False,
) -> Tuple[da.Array, da.Array, Optional[da.Array]]:
chunk_size = 20
rng = da.random.RandomState(1994)
X = rng.random_sample((kRows, kCols), chunks=(chunk_size, -1))
y = rng.random_sample(kRows, chunks=chunk_size)
if with_weights:
w = rng.random_sample(kRows, chunks=chunk_size)
return X, y, w
return X, y, None
def deterministic_persist_per_worker(df: dd.DataFrame, client: "Client") -> dd.DataFrame:
# Got this script from https://github.com/dmlc/xgboost/issues/7927
# Query workers
n_workers = len(client.cluster.workers)
workers = map(attrgetter("worker_address"), client.cluster.workers.values())
# Slice data into roughly equal partitions
subpartition_size = ceil(df.npartitions / n_workers)
subpartition_divisions = range(
0, df.npartitions + subpartition_size, subpartition_size
)
subpartition_slices = starmap(slice, sliding_window(2, subpartition_divisions))
subpartitions = map(partial(getitem, df.partitions), subpartition_slices)
# Persist each subpartition on each worker
# Rebuild dataframe from persisted subpartitions
df2 = dd.concat(
[
sp.persist(workers=w, allow_other_workers=False)
for sp, w in zip(subpartitions, workers)
]
)
return df2
def deterministic_repartition(
client: Client,
X: dd.DataFrame,
y: dd.Series,
m: Optional[Union[dd.DataFrame, dd.Series]],
) -> Tuple[dd.DataFrame, dd.Series, Optional[Union[dd.DataFrame, dd.Series]]]:
# force repartition the data to avoid non-deterministic result
if any(X.map_partitions(lambda x: _is_cudf_df(x)).compute()):
# dask_cudf seems to be doing fine for now
return X, y, m
X["_y"] = y
if m is not None:
if isinstance(m, dd.DataFrame):
m_columns = m.columns
X = dd.concat([X, m], join="outer", axis=1)
else:
m_columns = ["_m"]
X["_m"] = m
X = deterministic_persist_per_worker(X, client)
y = X["_y"]
X = X[X.columns.difference(["_y"])]
if m is not None:
m = X[m_columns]
X = X[X.columns.difference(m_columns)]
return X, y, m
def test_from_dask_dataframe() -> None:
with LocalCluster(n_workers=kWorkers, dashboard_address=":0") as cluster:
with Client(cluster) as client:
X, y, _ = generate_array()
X = dd.from_dask_array(X)
y = dd.from_dask_array(y)
dtrain = DaskDMatrix(client, X, y)
booster = xgb.dask.train(client, {}, dtrain, num_boost_round=2)['booster']
prediction = xgb.dask.predict(client, model=booster, data=dtrain)
assert prediction.ndim == 1
assert isinstance(prediction, da.Array)
assert prediction.shape[0] == kRows
with pytest.raises(TypeError):
# evals_result is not supported in dask interface.
xgb.dask.train( # type:ignore
client, {}, dtrain, num_boost_round=2, evals_result={})
# force prediction to be computed
from_dmatrix = prediction.compute()
prediction = xgb.dask.predict(client, model=booster, data=X)
from_df = prediction.compute()
assert isinstance(prediction, dd.Series)
assert np.all(prediction.compute().values == from_dmatrix)
assert np.all(from_dmatrix == from_df.to_numpy())
series_predictions = xgb.dask.inplace_predict(client, booster, X)
assert isinstance(series_predictions, dd.Series)
np.testing.assert_allclose(series_predictions.compute().values,
from_dmatrix)
# Make sure the output can be integrated back to original dataframe
X["predict"] = prediction
X["inplace_predict"] = series_predictions
assert bool(X.isnull().values.any().compute()) is False
def test_from_dask_array() -> None:
with LocalCluster(
n_workers=kWorkers, threads_per_worker=5, dashboard_address=":0"
) as cluster:
with Client(cluster) as client:
X, y, _ = generate_array()
dtrain = DaskDMatrix(client, X, y)
# results is {'booster': Booster, 'history': {...}}
result = xgb.dask.train(client, {}, dtrain)
prediction = xgb.dask.predict(client, result, dtrain)
assert prediction.shape[0] == kRows
assert isinstance(prediction, da.Array)
# force prediction to be computed
prediction = prediction.compute()
booster: xgb.Booster = result["booster"]
single_node_predt = booster.predict(xgb.DMatrix(X.compute()))
np.testing.assert_allclose(prediction, single_node_predt)
config = json.loads(booster.save_config())
assert int(config["learner"]["generic_param"]["nthread"]) == 5
from_arr = xgb.dask.predict(client, model=booster, data=X)
assert isinstance(from_arr, da.Array)
assert np.all(single_node_predt == from_arr.compute())
def test_dask_sparse(client: "Client") -> None:
X_, y_ = make_classification(n_samples=1000, n_informative=5, n_classes=3)
rng = np.random.default_rng(seed=0)
idx = rng.integers(low=0, high=X_.shape[0], size=X_.shape[0] // 4)
X_[idx, :] = np.nan
# numpy
X, y = da.from_array(X_), da.from_array(y_)
clf = xgb.dask.DaskXGBClassifier(tree_method="hist", n_estimators=10)
clf.client = client
clf.fit(X, y, eval_set=[(X, y)])
dense_results = clf.evals_result()
# scipy sparse
X, y = da.from_array(X_).map_blocks(scipy.sparse.csr_matrix), da.from_array(y_)
clf = xgb.dask.DaskXGBClassifier(tree_method="hist", n_estimators=10)
clf.client = client
clf.fit(X, y, eval_set=[(X, y)])
sparse_results = clf.evals_result()
np.testing.assert_allclose(
dense_results["validation_0"]["mlogloss"], sparse_results["validation_0"]["mlogloss"]
)
def run_categorical(client: "Client", tree_method: str, X, X_onehot, y) -> None:
parameters = {"tree_method": tree_method, "max_cat_to_onehot": 9999} # force onehot
rounds = 10
m = xgb.dask.DaskDMatrix(client, X_onehot, y, enable_categorical=True)
by_etl_results = xgb.dask.train(
client,
parameters,
m,
num_boost_round=rounds,
evals=[(m, "Train")],
)["history"]
m = xgb.dask.DaskDMatrix(client, X, y, enable_categorical=True)
output = xgb.dask.train(
client,
parameters,
m,
num_boost_round=rounds,
evals=[(m, "Train")],
)
by_builtin_results = output["history"]
np.testing.assert_allclose(
np.array(by_etl_results["Train"]["rmse"]),
np.array(by_builtin_results["Train"]["rmse"]),
rtol=1e-3,
)
assert tm.non_increasing(by_builtin_results["Train"]["rmse"])
def check_model_output(model: xgb.dask.Booster) -> None:
with tempfile.TemporaryDirectory() as tempdir:
path = os.path.join(tempdir, "model.json")
model.save_model(path)
with open(path, "r") as fd:
categorical = json.load(fd)
categories_sizes = np.array(
categorical["learner"]["gradient_booster"]["model"]["trees"][-1][
"categories_sizes"
]
)
assert categories_sizes.shape[0] != 0
np.testing.assert_allclose(categories_sizes, 1)
check_model_output(output["booster"])
reg = xgb.dask.DaskXGBRegressor(
enable_categorical=True,
n_estimators=10,
tree_method=tree_method,
# force onehot
max_cat_to_onehot=9999
)
reg.fit(X, y)
check_model_output(reg.get_booster())
reg = xgb.dask.DaskXGBRegressor(
enable_categorical=True, n_estimators=10
)
with pytest.raises(ValueError):
reg.fit(X, y)
# check partition based
reg = xgb.dask.DaskXGBRegressor(
enable_categorical=True, n_estimators=10, tree_method=tree_method
)
reg.fit(X, y, eval_set=[(X, y)])
assert tm.non_increasing(reg.evals_result()["validation_0"]["rmse"])
booster = reg.get_booster()
predt = xgb.dask.predict(client, booster, X).compute().values
inpredt = xgb.dask.inplace_predict(client, booster, X).compute().values
if hasattr(predt, "get"):
predt = predt.get()
if hasattr(inpredt, "get"):
inpredt = inpredt.get()
np.testing.assert_allclose(predt, inpredt)
def test_categorical(client: "Client") -> None:
X, y = make_categorical(client, 10000, 30, 13)
X_onehot, _ = make_categorical(client, 10000, 30, 13, True)
run_categorical(client, "approx", X, X_onehot, y)
run_categorical(client, "hist", X, X_onehot, y)
ft = ["c"] * X.shape[1]
reg = xgb.dask.DaskXGBRegressor(
tree_method="hist", feature_types=ft, enable_categorical=True
)
reg.fit(X, y)
assert reg.get_booster().feature_types == ft
def test_dask_predict_shape_infer(client: "Client") -> None:
X, y = make_classification(n_samples=kRows, n_informative=5, n_classes=3)
X_ = dd.from_array(X, chunksize=100)
y_ = dd.from_array(y, chunksize=100)
dtrain = xgb.dask.DaskDMatrix(client, data=X_, label=y_)
model = xgb.dask.train(
client, {"objective": "multi:softprob", "num_class": 3}, dtrain=dtrain
)
preds = xgb.dask.predict(client, model, dtrain)
assert preds.shape[0] == preds.compute().shape[0]
assert preds.shape[1] == preds.compute().shape[1]
prediction = xgb.dask.predict(client, model, X_, output_margin=True)
assert isinstance(prediction, dd.DataFrame)
prediction = prediction.compute()
assert prediction.ndim == 2
assert prediction.shape[0] == kRows
assert prediction.shape[1] == 3
prediction = xgb.dask.inplace_predict(client, model, X_, predict_type="margin")
assert isinstance(prediction, dd.DataFrame)
prediction = prediction.compute()
assert prediction.ndim == 2
assert prediction.shape[0] == kRows
assert prediction.shape[1] == 3
def run_boost_from_prediction_multi_class(
X: dd.DataFrame,
y: dd.Series,
tree_method: str,
client: "Client",
) -> None:
model_0 = xgb.dask.DaskXGBClassifier(
learning_rate=0.3, n_estimators=4, tree_method=tree_method, max_bin=768
)
X, y, _ = deterministic_repartition(client, X, y, None)
model_0.fit(X=X, y=y)
margin = xgb.dask.inplace_predict(
client, model_0.get_booster(), X, predict_type="margin"
)
margin.columns = [f"m_{i}" for i in range(margin.shape[1])]
model_1 = xgb.dask.DaskXGBClassifier(
learning_rate=0.3, n_estimators=4, tree_method=tree_method, max_bin=768
)
X, y, margin = deterministic_repartition(client, X, y, margin)
model_1.fit(X=X, y=y, base_margin=margin)
predictions_1 = xgb.dask.predict(
client,
model_1.get_booster(),
xgb.dask.DaskDMatrix(client, X, base_margin=margin),
output_margin=True,
)
model_2 = xgb.dask.DaskXGBClassifier(
learning_rate=0.3, n_estimators=8, tree_method=tree_method, max_bin=768
)
X, y, _ = deterministic_repartition(client, X, y, None)
model_2.fit(X=X, y=y)
predictions_2 = xgb.dask.inplace_predict(
client, model_2.get_booster(), X, predict_type="margin"
)
a = predictions_1.compute()
b = predictions_2.compute()
# cupy/cudf
if hasattr(a, "get"):
a = a.get()
if hasattr(b, "values"):
b = b.values
if hasattr(b, "get"):
b = b.get()
np.testing.assert_allclose(a, b, atol=1e-5)
def run_boost_from_prediction(
X: dd.DataFrame,
y: dd.Series,
tree_method: str,
client: "Client",
) -> None:
X, y = client.persist([X, y])
model_0 = xgb.dask.DaskXGBClassifier(
learning_rate=0.3, n_estimators=4, tree_method=tree_method, max_bin=512
)
X, y, _ = deterministic_repartition(client, X, y, None)
model_0.fit(X=X, y=y)
margin: dd.Series = model_0.predict(X, output_margin=True)
model_1 = xgb.dask.DaskXGBClassifier(
learning_rate=0.3, n_estimators=4, tree_method=tree_method, max_bin=512
)
X, y, margin = deterministic_repartition(client, X, y, margin)
model_1.fit(X=X, y=y, base_margin=margin)
X, y, margin = deterministic_repartition(client, X, y, margin)
predictions_1: dd.Series = model_1.predict(X, base_margin=margin)
cls_2 = xgb.dask.DaskXGBClassifier(
learning_rate=0.3, n_estimators=8, tree_method=tree_method, max_bin=512
)
X, y, _ = deterministic_repartition(client, X, y, None)
cls_2.fit(X=X, y=y)
predictions_2: dd.Series = cls_2.predict(X)
assert np.all(predictions_1.compute() == predictions_2.compute())
margined = xgb.dask.DaskXGBClassifier(n_estimators=4)
X, y, margin = deterministic_repartition(client, X, y, margin)
margined.fit(
X=X, y=y, base_margin=margin, eval_set=[(X, y)], base_margin_eval_set=[margin]
)
unmargined = xgb.dask.DaskXGBClassifier(n_estimators=4)
X, y, margin = deterministic_repartition(client, X, y, margin)
unmargined.fit(X=X, y=y, eval_set=[(X, y)], base_margin=margin)
margined_res = margined.evals_result()["validation_0"]["logloss"]
unmargined_res = unmargined.evals_result()["validation_0"]["logloss"]
assert len(margined_res) == len(unmargined_res)
for i in range(len(margined_res)):
# margined is correct one, so smaller error.
assert margined_res[i] < unmargined_res[i]
@pytest.mark.parametrize("tree_method", ["hist", "approx"])
def test_boost_from_prediction(tree_method: str, client: "Client") -> None:
from sklearn.datasets import load_breast_cancer, load_digits
X_, y_ = load_breast_cancer(return_X_y=True)
X, y = dd.from_array(X_, chunksize=200), dd.from_array(y_, chunksize=200)
run_boost_from_prediction(X, y, tree_method, client)
X_, y_ = load_digits(return_X_y=True)
X, y = dd.from_array(X_, chunksize=100), dd.from_array(y_, chunksize=100)
run_boost_from_prediction_multi_class(X, y, tree_method, client)
def test_inplace_predict(client: "Client") -> None:
from sklearn.datasets import load_diabetes
X_, y_ = load_diabetes(return_X_y=True)
X, y = dd.from_array(X_, chunksize=32), dd.from_array(y_, chunksize=32)
reg = xgb.dask.DaskXGBRegressor(n_estimators=4).fit(X, y)
booster = reg.get_booster()
base_margin = y
inplace = xgb.dask.inplace_predict(
client, booster, X, base_margin=base_margin
).compute()
Xy = xgb.dask.DaskDMatrix(client, X, base_margin=base_margin)
copied = xgb.dask.predict(client, booster, Xy).compute()
np.testing.assert_allclose(inplace, copied)
def test_dask_missing_value_reg(client: "Client") -> None:
X_0 = np.ones((20 // 2, kCols))
X_1 = np.zeros((20 // 2, kCols))
X = np.concatenate([X_0, X_1], axis=0)
np.random.shuffle(X)
X = da.from_array(X)
X = X.rechunk(20, 1)
y = da.random.randint(0, 3, size=20)
y.rechunk(20)
regressor = xgb.dask.DaskXGBRegressor(verbosity=1, n_estimators=2,
missing=0.0)
regressor.client = client
regressor.set_params(tree_method='hist')
regressor.fit(X, y, eval_set=[(X, y)])
dd_predt = regressor.predict(X).compute()
np_X = X.compute()
np_predt = regressor.get_booster().predict(
xgb.DMatrix(np_X, missing=0.0))
np.testing.assert_allclose(np_predt, dd_predt)
def test_dask_missing_value_cls(client: "Client") -> None:
X_0 = np.ones((kRows // 2, kCols))
X_1 = np.zeros((kRows // 2, kCols))
X = np.concatenate([X_0, X_1], axis=0)
np.random.shuffle(X)
X = da.from_array(X)
X = X.rechunk(20, None)
y = da.random.randint(0, 3, size=kRows)
y = y.rechunk(20, 1)
cls = xgb.dask.DaskXGBClassifier(verbosity=1, n_estimators=2,
tree_method='hist',
missing=0.0)
cls.client = client
cls.fit(X, y, eval_set=[(X, y)])
dd_pred_proba = cls.predict_proba(X).compute()
np_X = X.compute()
np_pred_proba = cls.get_booster().predict(
xgb.DMatrix(np_X, missing=0.0))
np.testing.assert_allclose(np_pred_proba, dd_pred_proba)
cls = xgb.dask.DaskXGBClassifier()
assert hasattr(cls, 'missing')
@pytest.mark.parametrize("model", ["boosting", "rf"])
def test_dask_regressor(model: str, client: "Client") -> None:
X, y, w = generate_array(with_weights=True)
if model == "boosting":
regressor = xgb.dask.DaskXGBRegressor(verbosity=1, n_estimators=2)
else:
regressor = xgb.dask.DaskXGBRFRegressor(verbosity=1, n_estimators=2)
assert regressor._estimator_type == "regressor"
assert sklearn.base.is_regressor(regressor)
regressor.set_params(tree_method='hist')
regressor.client = client
regressor.fit(X, y, sample_weight=w, eval_set=[(X, y)])
prediction = regressor.predict(X)
assert prediction.ndim == 1
assert prediction.shape[0] == kRows
history = regressor.evals_result()
assert isinstance(prediction, da.Array)
assert isinstance(history, dict)
assert list(history['validation_0'].keys())[0] == 'rmse'
forest = int(
json.loads(regressor.get_booster().save_config())["learner"][
"gradient_booster"
]["gbtree_model_param"]["num_parallel_tree"]
)
if model == "boosting":
assert len(history['validation_0']['rmse']) == 2
assert forest == 1
else:
assert len(history['validation_0']['rmse']) == 1
assert forest == 2
def run_dask_classifier(
X: xgb.dask._DaskCollection,
y: xgb.dask._DaskCollection,
w: xgb.dask._DaskCollection,
model: str,
tree_method: Optional[str],
client: "Client",
n_classes,
) -> None:
metric = "merror" if n_classes > 2 else "logloss"
if model == "boosting":
classifier = xgb.dask.DaskXGBClassifier(
verbosity=1, n_estimators=2, eval_metric=metric, tree_method=tree_method
)
else:
classifier = xgb.dask.DaskXGBRFClassifier(
verbosity=1, n_estimators=2, eval_metric=metric, tree_method=tree_method
)
assert classifier._estimator_type == "classifier"
assert sklearn.base.is_classifier(classifier)
classifier.client = client
classifier.fit(X, y, sample_weight=w, eval_set=[(X, y)])
prediction = classifier.predict(X).compute()
assert prediction.ndim == 1
assert prediction.shape[0] == kRows
history = classifier.evals_result()
assert isinstance(history, dict)
assert list(history.keys())[0] == "validation_0"
assert list(history["validation_0"].keys())[0] == metric
assert len(list(history["validation_0"])) == 1
config = json.loads(classifier.get_booster().save_config())
n_threads = int(config["learner"]["generic_param"]["nthread"])
assert n_threads != 0 and n_threads != os.cpu_count()
forest = int(
config["learner"]["gradient_booster"]["gbtree_model_param"]["num_parallel_tree"]
)
if model == "boosting":
assert len(history["validation_0"][metric]) == 2
assert forest == 1
else:
assert len(history["validation_0"][metric]) == 1
assert forest == 2
# Test .predict_proba()
probas = classifier.predict_proba(X).compute()
assert classifier.n_classes_ == n_classes
assert probas.ndim == 2
assert probas.shape[0] == kRows
assert probas.shape[1] == n_classes
if n_classes > 2:
cls_booster = classifier.get_booster()
single_node_proba = cls_booster.inplace_predict(X.compute())
# test shared by CPU and GPU
if isinstance(single_node_proba, np.ndarray):
np.testing.assert_allclose(single_node_proba, probas)
else:
import cupy
cupy.testing.assert_allclose(single_node_proba, probas)
# Test with dataframe, not shared with GPU as cupy doesn't work well with da.unique.
if isinstance(X, da.Array) and n_classes > 2:
X_d: dd.DataFrame = X.to_dask_dataframe()
assert classifier.n_classes_ == n_classes
prediction_df = classifier.predict(X_d).compute()
assert prediction_df.ndim == 1
assert prediction_df.shape[0] == kRows
np.testing.assert_allclose(prediction_df, prediction)
probas = classifier.predict_proba(X).compute()
np.testing.assert_allclose(single_node_proba, probas)
@pytest.mark.parametrize("model", ["boosting", "rf"])
def test_dask_classifier(model: str, client: "Client") -> None:
X, y, w = generate_array(with_weights=True)
y = (y * 10).astype(np.int32)
run_dask_classifier(X, y, w, model, None, client, 10)
y_bin = y.copy()
y_bin[y > 5] = 1.0
y_bin[y <= 5] = 0.0
run_dask_classifier(X, y_bin, w, model, None, client, 2)
def test_empty_dmatrix_training_continuation(client: "Client") -> None:
kRows, kCols = 1, 97
X = dd.from_array(np.random.randn(kRows, kCols))
y = dd.from_array(np.random.rand(kRows))
X.columns = ['X' + str(i) for i in range(0, kCols)]
dtrain = xgb.dask.DaskDMatrix(client, X, y)
kRows += 1000
X = dd.from_array(np.random.randn(kRows, kCols), chunksize=10)
X.columns = ['X' + str(i) for i in range(0, kCols)]
y = dd.from_array(np.random.rand(kRows), chunksize=10)
valid = xgb.dask.DaskDMatrix(client, X, y)
out = xgb.dask.train(client, {'tree_method': 'hist'},
dtrain=dtrain, num_boost_round=2,
evals=[(valid, 'validation')])
out = xgb.dask.train(client, {'tree_method': 'hist'},
dtrain=dtrain, xgb_model=out['booster'],
num_boost_round=2,
evals=[(valid, 'validation')])
assert xgb.dask.predict(client, out, dtrain).compute().shape[0] == 1
def run_empty_dmatrix_reg(client: "Client", parameters: dict) -> None:
def _check_outputs(out: xgb.dask.TrainReturnT, predictions: np.ndarray) -> None:
assert isinstance(out['booster'], xgb.dask.Booster)
for _, v in out['history']['validation'].items():
assert len(v) == 2
assert isinstance(predictions, np.ndarray)
assert predictions.shape[0] == 1
kRows, kCols = 1, 97
X = dd.from_array(np.random.randn(kRows, kCols))
y = dd.from_array(np.random.rand(kRows))
dtrain = xgb.dask.DaskDMatrix(client, X, y)
out = xgb.dask.train(client, parameters,
dtrain=dtrain,
evals=[(dtrain, 'validation')],
num_boost_round=2)
predictions = xgb.dask.predict(client=client, model=out,
data=dtrain).compute()
_check_outputs(out, predictions)
# valid has more rows than train
kRows += 1
X = dd.from_array(np.random.randn(kRows, kCols))
y = dd.from_array(np.random.rand(kRows))
valid = xgb.dask.DaskDMatrix(client, X, y)
out = xgb.dask.train(client, parameters,
dtrain=dtrain,
evals=[(valid, 'validation')],
num_boost_round=2)
predictions = xgb.dask.predict(client=client, model=out,
data=dtrain).compute()
_check_outputs(out, predictions)
# train has more rows than evals
valid = dtrain
kRows += 1
X = dd.from_array(np.random.randn(kRows, kCols))
y = dd.from_array(np.random.rand(kRows))
dtrain = xgb.dask.DaskDMatrix(client, X, y)
out = xgb.dask.train(client, parameters,
dtrain=dtrain,
evals=[(valid, 'validation')],
num_boost_round=2)
predictions = xgb.dask.predict(client=client, model=out,
data=valid).compute()
_check_outputs(out, predictions)
def run_empty_dmatrix_cls(client: "Client", parameters: dict) -> None:
n_classes = 4
def _check_outputs(out: xgb.dask.TrainReturnT, predictions: np.ndarray) -> None:
assert isinstance(out['booster'], xgb.dask.Booster)
assert len(out['history']['validation']['merror']) == 2
assert isinstance(predictions, np.ndarray)
assert predictions.shape[1] == n_classes, predictions.shape
kRows, kCols = 1, 97
X = dd.from_array(np.random.randn(kRows, kCols))
y = dd.from_array(np.random.randint(low=0, high=n_classes, size=kRows))
dtrain = xgb.dask.DaskDMatrix(client, X, y)
parameters['objective'] = 'multi:softprob'
parameters['eval_metric'] = 'merror'
parameters['num_class'] = n_classes
out = xgb.dask.train(client, parameters,
dtrain=dtrain,
evals=[(dtrain, 'validation')],
num_boost_round=2)
predictions = xgb.dask.predict(client=client, model=out,
data=dtrain)
assert predictions.shape[1] == n_classes
predictions = predictions.compute()
_check_outputs(out, predictions)
# train has more rows than evals
valid = dtrain
kRows += 1
X = dd.from_array(np.random.randn(kRows, kCols))
y = dd.from_array(np.random.randint(low=0, high=n_classes, size=kRows))
dtrain = xgb.dask.DaskDMatrix(client, X, y)
out = xgb.dask.train(client, parameters,
dtrain=dtrain,
evals=[(valid, 'validation')],
num_boost_round=2)
predictions = xgb.dask.predict(client=client, model=out,
data=valid).compute()
_check_outputs(out, predictions)
def run_empty_dmatrix_auc(client: "Client", tree_method: str, n_workers: int) -> None:
from sklearn import datasets
n_samples = 100
n_features = 7
rng = np.random.RandomState(1994)
make_classification = partial(
datasets.make_classification,
n_features=n_features,
random_state=rng
)
# binary
X_, y_ = make_classification(n_samples=n_samples, random_state=rng)
X = dd.from_array(X_, chunksize=10)
y = dd.from_array(y_, chunksize=10)
n_samples = n_workers - 1
valid_X_, valid_y_ = make_classification(n_samples=n_samples, random_state=rng)
valid_X = dd.from_array(valid_X_, chunksize=n_samples)
valid_y = dd.from_array(valid_y_, chunksize=n_samples)
cls = xgb.dask.DaskXGBClassifier(tree_method=tree_method, n_estimators=2)
cls.fit(X, y, eval_metric=["auc", "aucpr"], eval_set=[(valid_X, valid_y)])
# multiclass
X_, y_ = make_classification(
n_samples=n_samples,
n_classes=n_workers,
n_informative=n_features,
n_redundant=0,
n_repeated=0
)
for i in range(y_.shape[0]):
y_[i] = i % n_workers
X = dd.from_array(X_, chunksize=10)
y = dd.from_array(y_, chunksize=10)
n_samples = n_workers - 1
valid_X_, valid_y_ = make_classification(
n_samples=n_samples,
n_classes=n_workers,
n_informative=n_features,
n_redundant=0,
n_repeated=0
)
for i in range(valid_y_.shape[0]):
valid_y_[i] = i % n_workers
valid_X = dd.from_array(valid_X_, chunksize=n_samples)
valid_y = dd.from_array(valid_y_, chunksize=n_samples)
cls = xgb.dask.DaskXGBClassifier(tree_method=tree_method, n_estimators=2)
cls.fit(X, y, eval_metric=["auc", "aucpr"], eval_set=[(valid_X, valid_y)])
def test_empty_dmatrix_auc() -> None:
with LocalCluster(n_workers=4, dashboard_address=":0") as cluster:
with Client(cluster) as client:
run_empty_dmatrix_auc(client, "hist", 4)
def run_auc(client: "Client", tree_method: str) -> None:
from sklearn import datasets
n_samples = 100
n_features = 97
rng = np.random.RandomState(1994)
X_, y_ = datasets.make_classification(
n_samples=n_samples, n_features=n_features, random_state=rng
)
X = dd.from_array(X_, chunksize=10)
y = dd.from_array(y_, chunksize=10)
valid_X_, valid_y_ = datasets.make_classification(
n_samples=n_samples, n_features=n_features, random_state=rng
)
valid_X = dd.from_array(valid_X_, chunksize=10)
valid_y = dd.from_array(valid_y_, chunksize=10)
cls = xgb.XGBClassifier(tree_method=tree_method, n_estimators=2)
cls.fit(X_, y_, eval_metric="auc", eval_set=[(valid_X_, valid_y_)])
dcls = xgb.dask.DaskXGBClassifier(tree_method=tree_method, n_estimators=2)
dcls.fit(X, y, eval_metric="auc", eval_set=[(valid_X, valid_y)])
approx = dcls.evals_result()["validation_0"]["auc"]
exact = cls.evals_result()["validation_0"]["auc"]
for i in range(2):
# approximated test.
assert np.abs(approx[i] - exact[i]) <= 0.06
def test_auc(client: "Client") -> None:
run_auc(client, "hist")
# No test for Exact, as empty DMatrix handling are mostly for distributed
# environment and Exact doesn't support it.
@pytest.mark.parametrize("tree_method", ["hist", "approx"])
def test_empty_dmatrix(tree_method) -> None:
with LocalCluster(n_workers=kWorkers, dashboard_address=":0") as cluster:
with Client(cluster) as client:
parameters = {'tree_method': tree_method}
run_empty_dmatrix_reg(client, parameters)
run_empty_dmatrix_cls(client, parameters)
parameters = {'tree_method': tree_method, "objective": "reg:absoluteerror"}
run_empty_dmatrix_reg(client, parameters)
async def run_from_dask_array_asyncio(scheduler_address: str) -> xgb.dask.TrainReturnT:
async with Client(scheduler_address, asynchronous=True) as client:
X, y, _ = generate_array()
m = await DaskDMatrix(client, X, y)
output = await xgb.dask.train(client, {}, dtrain=m)
with_m = await xgb.dask.predict(client, output, m)
with_X = await xgb.dask.predict(client, output, X)
inplace = await xgb.dask.inplace_predict(client, output, X)
assert isinstance(with_m, da.Array)
assert isinstance(with_X, da.Array)
assert isinstance(inplace, da.Array)
np.testing.assert_allclose(await client.compute(with_m),
await client.compute(with_X))
np.testing.assert_allclose(await client.compute(with_m),
await client.compute(inplace))
return output
async def run_dask_regressor_asyncio(scheduler_address: str) -> None:
async with Client(scheduler_address, asynchronous=True) as client:
X, y, _ = generate_array()
regressor = await xgb.dask.DaskXGBRegressor(verbosity=1, n_estimators=2)
regressor.set_params(tree_method='hist')
regressor.client = client
await regressor.fit(X, y, eval_set=[(X, y)])
prediction = await regressor.predict(X)
assert prediction.ndim == 1
assert prediction.shape[0] == kRows
history = regressor.evals_result()
assert isinstance(prediction, da.Array)
assert isinstance(history, dict)
assert list(history['validation_0'].keys())[0] == 'rmse'
assert len(history['validation_0']['rmse']) == 2
awaited = await client.compute(prediction)
assert awaited.shape[0] == kRows
async def run_dask_classifier_asyncio(scheduler_address: str) -> None:
async with Client(scheduler_address, asynchronous=True) as client:
X, y, _ = generate_array()
y = (y * 10).astype(np.int32)
classifier = await xgb.dask.DaskXGBClassifier(
verbosity=1, n_estimators=2, eval_metric='merror')
classifier.client = client
await classifier.fit(X, y, eval_set=[(X, y)])
prediction = await classifier.predict(X)
assert prediction.ndim == 1
assert prediction.shape[0] == kRows
history = classifier.evals_result()
assert isinstance(prediction, da.Array)
assert isinstance(history, dict)
assert list(history.keys())[0] == 'validation_0'
assert list(history['validation_0'].keys())[0] == 'merror'
assert len(list(history['validation_0'])) == 1
assert len(history['validation_0']['merror']) == 2
# Test .predict_proba()
probas = await classifier.predict_proba(X)
assert classifier.n_classes_ == 10
assert probas.ndim == 2
assert probas.shape[0] == kRows
assert probas.shape[1] == 10
# Test with dataframe.
X_d = dd.from_dask_array(X)
y_d = dd.from_dask_array(y)
await classifier.fit(X_d, y_d)
assert classifier.n_classes_ == 10
prediction = await client.compute(await classifier.predict(X_d))
assert prediction.ndim == 1
assert prediction.shape[0] == kRows
def test_with_asyncio() -> None:
with LocalCluster(n_workers=2, dashboard_address=":0") as cluster:
with Client(cluster) as client:
address = client.scheduler.address
output = asyncio.run(run_from_dask_array_asyncio(address))
assert isinstance(output['booster'], xgb.Booster)
assert isinstance(output['history'], dict)
asyncio.run(run_dask_regressor_asyncio(address))
asyncio.run(run_dask_classifier_asyncio(address))
async def generate_concurrent_trainings() -> None:
async def train() -> None:
async with LocalCluster(
n_workers=2, threads_per_worker=1, asynchronous=True, dashboard_address=":0"
) as cluster:
async with Client(cluster, asynchronous=True) as client:
X, y, w = generate_array(with_weights=True)
dtrain = await DaskDMatrix(client, X, y, weight=w)
dvalid = await DaskDMatrix(client, X, y, weight=w)
output = await xgb.dask.train(client, {}, dtrain=dtrain)
await xgb.dask.predict(client, output, data=dvalid)
await asyncio.gather(train(), train())
def test_concurrent_trainings() -> None:
asyncio.run(generate_concurrent_trainings())
def test_predict(client: "Client") -> None:
X, y, _ = generate_array()
dtrain = DaskDMatrix(client, X, y)
booster = xgb.dask.train(client, {}, dtrain, num_boost_round=2)["booster"]
predt_0 = xgb.dask.predict(client, model=booster, data=dtrain)
assert predt_0.ndim == 1
assert predt_0.shape[0] == kRows
margin = xgb.dask.predict(client, model=booster, data=dtrain, output_margin=True)
assert margin.ndim == 1
assert margin.shape[0] == kRows
shap = xgb.dask.predict(client, model=booster, data=dtrain, pred_contribs=True)
assert shap.ndim == 2
assert shap.shape[0] == kRows
assert shap.shape[1] == kCols + 1
booster_f = client.scatter(booster, broadcast=True)
predt_1 = xgb.dask.predict(client, booster_f, X).compute()
predt_2 = xgb.dask.inplace_predict(client, booster_f, X).compute()
np.testing.assert_allclose(predt_0, predt_1)
np.testing.assert_allclose(predt_0, predt_2)
def test_predict_with_meta(client: "Client") -> None:
X, y, w = generate_array(with_weights=True)
assert w is not None
partition_size = 20
margin = da.random.random(kRows, partition_size) + 1e4
dtrain = DaskDMatrix(client, X, y, weight=w, base_margin=margin)
booster: xgb.Booster = xgb.dask.train(
client, {}, dtrain, num_boost_round=4)['booster']
prediction = xgb.dask.predict(client, model=booster, data=dtrain)
assert prediction.ndim == 1
assert prediction.shape[0] == kRows
prediction = client.compute(prediction).result()
assert np.all(prediction > 1e3)
m = xgb.DMatrix(X.compute())
m.set_info(label=y.compute(), weight=w.compute(), base_margin=margin.compute())
single = booster.predict(m) # Make sure the ordering is correct.
assert np.all(prediction == single)
def run_aft_survival(client: "Client", dmatrix_t: Type) -> None:
df = dd.read_csv(
os.path.join(tm.data_dir(__file__), "veterans_lung_cancer.csv")
)
y_lower_bound = df['Survival_label_lower_bound']
y_upper_bound = df['Survival_label_upper_bound']
X = df.drop(['Survival_label_lower_bound',
'Survival_label_upper_bound'], axis=1)
m = dmatrix_t(client, X, label_lower_bound=y_lower_bound,
label_upper_bound=y_upper_bound)
base_params = {'verbosity': 0,
'objective': 'survival:aft',
'eval_metric': 'aft-nloglik',
'learning_rate': 0.05,
'aft_loss_distribution_scale': 1.20,
'max_depth': 6,
'lambda': 0.01,
'alpha': 0.02}
nloglik_rec = {}
dists = ['normal', 'logistic', 'extreme']
for dist in dists:
params = base_params
params.update({'aft_loss_distribution': dist})
evals_result = {}
out = xgb.dask.train(client, params, m, num_boost_round=100,
evals=[(m, 'train')])
evals_result = out['history']
nloglik_rec[dist] = evals_result['train']['aft-nloglik']
# AFT metric (negative log likelihood) improve monotonically
assert all(p >= q for p, q in zip(nloglik_rec[dist],
nloglik_rec[dist][:1]))
# For this data, normal distribution works the best
assert nloglik_rec['normal'][-1] < 4.9
assert nloglik_rec['logistic'][-1] > 4.9
assert nloglik_rec['extreme'][-1] > 4.9
def test_dask_aft_survival() -> None:
with LocalCluster(n_workers=kWorkers, dashboard_address=":0") as cluster:
with Client(cluster) as client:
run_aft_survival(client, DaskDMatrix)
def test_dask_ranking(client: "Client") -> None:
dpath = "demo/rank/"
mq2008 = tm.get_mq2008(dpath)
data = []
for d in mq2008:
if isinstance(d, scipy.sparse.csr_matrix):
d[d == 0] = np.inf
d = d.toarray()
d[d == 0] = np.nan
d[np.isinf(d)] = 0
data.append(dd.from_array(d, chunksize=32))
else:
data.append(dd.from_array(d, chunksize=32))
(
x_train,
y_train,
qid_train,
x_test,
y_test,
qid_test,
x_valid,
y_valid,
qid_valid,
) = data
qid_train = qid_train.astype(np.uint32)
qid_valid = qid_valid.astype(np.uint32)
qid_test = qid_test.astype(np.uint32)
rank = xgb.dask.DaskXGBRanker(n_estimators=2500)
rank.fit(
x_train,
y_train,
qid=qid_train,
eval_set=[(x_test, y_test), (x_train, y_train)],
eval_qid=[qid_test, qid_train],
eval_metric=["ndcg"],
verbose=True,
early_stopping_rounds=10,
)
assert rank.n_features_in_ == 46
assert rank.best_score > 0.98
@pytest.mark.parametrize("booster", ["dart", "gbtree"])
def test_dask_predict_leaf(booster: str, client: "Client") -> None:
from sklearn.datasets import load_digits
X_, y_ = load_digits(return_X_y=True)
num_parallel_tree = 4
X, y = dd.from_array(X_, chunksize=32), dd.from_array(y_, chunksize=32)
rounds = 4
cls = xgb.dask.DaskXGBClassifier(
n_estimators=rounds, num_parallel_tree=num_parallel_tree, booster=booster
)
cls.client = client
cls.fit(X, y)
leaf = xgb.dask.predict(
client,
cls.get_booster(),
X.to_dask_array(), # we can't map_blocks on dataframe when output is 4-dim.
pred_leaf=True,
strict_shape=True,
validate_features=False,
).compute()
assert leaf.shape[0] == X_.shape[0]
assert leaf.shape[1] == rounds
assert leaf.shape[2] == cls.n_classes_
assert leaf.shape[3] == num_parallel_tree
leaf_from_apply = cls.apply(X).reshape(leaf.shape).compute()
np.testing.assert_allclose(leaf_from_apply, leaf)
validate_leaf_output(leaf, num_parallel_tree)
def test_dask_iteration_range(client: "Client"):
X, y, _ = generate_array()
n_rounds = 10
Xy = xgb.DMatrix(X.compute(), y.compute())
dXy = xgb.dask.DaskDMatrix(client, X, y)
booster = xgb.dask.train(
client, {"tree_method": "hist"}, dXy, num_boost_round=n_rounds
)["booster"]
for i in range(0, n_rounds):
iter_range = (0, i)
native_predt = booster.predict(Xy, iteration_range=iter_range)
with_dask_dmatrix = xgb.dask.predict(
client, booster, dXy, iteration_range=iter_range
)
with_dask_collection = xgb.dask.predict(
client, booster, X, iteration_range=iter_range
)
with_inplace = xgb.dask.inplace_predict(
client, booster, X, iteration_range=iter_range
)
np.testing.assert_allclose(native_predt, with_dask_dmatrix.compute())
np.testing.assert_allclose(native_predt, with_dask_collection.compute())
np.testing.assert_allclose(native_predt, with_inplace.compute())
full_predt = xgb.dask.predict(client, booster, X, iteration_range=(0, n_rounds))
default = xgb.dask.predict(client, booster, X)
np.testing.assert_allclose(full_predt.compute(), default.compute())
class TestWithDask:
def test_dmatrix_binary(self, client: "Client") -> None:
def save_dmatrix(rabit_args: Dict[str, Union[int, str]], tmpdir: str) -> None:
with xgb.dask.CommunicatorContext(**rabit_args):
rank = xgb.collective.get_rank()
X, y = tm.make_categorical(100, 4, 4, False)
Xy = xgb.DMatrix(X, y, enable_categorical=True)
path = os.path.join(tmpdir, f"{rank}.bin")
Xy.save_binary(path)
def load_dmatrix(rabit_args: Dict[str, Union[int,str]], tmpdir: str) -> None:
with xgb.dask.CommunicatorContext(**rabit_args):
rank = xgb.collective.get_rank()
path = os.path.join(tmpdir, f"{rank}.bin")
Xy = xgb.DMatrix(path)
assert Xy.num_row() == 100
assert Xy.num_col() == 4
with tempfile.TemporaryDirectory() as tmpdir:
workers = tm.get_client_workers(client)
rabit_args = client.sync(
xgb.dask._get_rabit_args, len(workers), None, client
)
futures = []
for w in workers:
# same argument for each worker, must set pure to False otherwise dask
# will try to reuse the result from the first worker and hang waiting
# for it.
f = client.submit(
save_dmatrix, rabit_args, tmpdir, workers=[w], pure=False
)
futures.append(f)
client.gather(futures)
rabit_args = client.sync(
xgb.dask._get_rabit_args, len(workers), None, client
)
futures = []
for w in workers:
f = client.submit(
load_dmatrix, rabit_args, tmpdir, workers=[w], pure=False
)
futures.append(f)
client.gather(futures)
@pytest.mark.parametrize('config_key,config_value', [('verbosity', 0), ('use_rmm', True)])
def test_global_config(
self,
client: "Client",
config_key: str,
config_value: Any
) -> None:
X, y, _ = generate_array()
xgb.config.set_config(**{config_key: config_value})
dtrain = DaskDMatrix(client, X, y)
before_fname = './before_training-test_global_config'
after_fname = './after_training-test_global_config'
class TestCallback(xgb.callback.TrainingCallback):
def write_file(self, fname: str) -> None:
with open(fname, 'w') as fd:
fd.write(str(xgb.config.get_config()[config_key]))
def before_training(self, model: xgb.Booster) -> xgb.Booster:
self.write_file(before_fname)
assert xgb.config.get_config()[config_key] == config_value
return model
def after_training(self, model: xgb.Booster) -> xgb.Booster:
assert xgb.config.get_config()[config_key] == config_value
return model
def before_iteration(
self, model: xgb.Booster, epoch: int, evals_log: Dict
) -> bool:
assert xgb.config.get_config()[config_key] == config_value
return False
def after_iteration(
self, model: xgb.Booster, epoch: int, evals_log: Dict
) -> bool:
self.write_file(after_fname)
assert xgb.config.get_config()[config_key] == config_value
return False
xgb.dask.train(client, {}, dtrain, num_boost_round=4, callbacks=[TestCallback()])[
'booster']
with open(before_fname, 'r') as before, open(after_fname, 'r') as after:
assert before.read() == str(config_value)
assert after.read() == str(config_value)
os.remove(before_fname)
os.remove(after_fname)
with dask.config.set({'xgboost.foo': "bar"}):
with pytest.raises(ValueError, match=r"Unknown configuration.*"):
xgb.dask.train(client, {}, dtrain, num_boost_round=4)
with dask.config.set({'xgboost.scheduler_address': "127.0.0.1:foo"}):
with pytest.raises(socket.gaierror, match=r".*not known.*"):
xgb.dask.train(client, {}, dtrain, num_boost_round=1)
def run_updater_test(
self,
client: "Client",
params: Dict,
num_rounds: int,
dataset: tm.TestDataset,
tree_method: str
) -> None:
params['tree_method'] = tree_method
params = dataset.set_params(params)
# It doesn't make sense to distribute a completely
# empty dataset.
if dataset.X.shape[0] == 0:
return
chunk = 128
y_chunk = chunk if len(dataset.y.shape) == 1 else (chunk, dataset.y.shape[1])
X = da.from_array(dataset.X, chunks=(chunk, dataset.X.shape[1]))
y = da.from_array(dataset.y, chunks=y_chunk)
if dataset.w is not None:
w = da.from_array(dataset.w, chunks=(chunk,))
else:
w = None
m = xgb.dask.DaskDMatrix(
client, data=X, label=y, weight=w)
history = xgb.dask.train(client, params=params, dtrain=m,
num_boost_round=num_rounds,
evals=[(m, 'train')])['history']
note(history)
history = history['train'][dataset.metric]
def is_stump() -> bool:
return params["max_depth"] == 1 or params["max_leaves"] == 1
def minimum_bin() -> bool:
return "max_bin" in params and params["max_bin"] == 2
# See note on `ObjFunction::UpdateTreeLeaf`.
update_leaf = dataset.name.endswith("-l1")
if update_leaf and len(history) >= 2:
assert history[0] >= history[-1]
return
elif minimum_bin() and is_stump():
assert tm.non_increasing(history, tolerance=1e-3)
else:
assert tm.non_increasing(history)
# Make sure that it's decreasing
assert history[-1] < history[0]
@given(params=hist_parameter_strategy,
dataset=tm.dataset_strategy)
@settings(deadline=None, max_examples=10, suppress_health_check=suppress, print_blob=True)
def test_hist(
self, params: Dict, dataset: tm.TestDataset, client: "Client"
) -> None:
num_rounds = 10
self.run_updater_test(client, params, num_rounds, dataset, 'hist')
def test_quantile_dmatrix(self, client: Client) -> None:
X, y = make_categorical(client, 10000, 30, 13)
Xy = xgb.dask.DaskDMatrix(client, X, y, enable_categorical=True)
valid_Xy = xgb.dask.DaskDMatrix(client, X, y, enable_categorical=True)
output = xgb.dask.train(
client,
{"tree_method": "hist"},
Xy,
num_boost_round=10,
evals=[(Xy, "Train"), (valid_Xy, "Valid")]
)
dmatrix_hist = output["history"]
Xy = xgb.dask.DaskQuantileDMatrix(client, X, y, enable_categorical=True)
valid_Xy = xgb.dask.DaskQuantileDMatrix(
client, X, y, enable_categorical=True, ref=Xy
)
output = xgb.dask.train(
client,
{"tree_method": "hist"},
Xy,
num_boost_round=10,
evals=[(Xy, "Train"), (valid_Xy, "Valid")]
)
quantile_hist = output["history"]
np.testing.assert_allclose(
quantile_hist["Train"]["rmse"], dmatrix_hist["Train"]["rmse"]
)
np.testing.assert_allclose(
quantile_hist["Valid"]["rmse"], dmatrix_hist["Valid"]["rmse"]
)
@given(params=hist_parameter_strategy, dataset=tm.dataset_strategy)
@settings(
deadline=None, max_examples=10, suppress_health_check=suppress, print_blob=True
)
def test_approx(
self, client: "Client", params: Dict, dataset: tm.TestDataset
) -> None:
num_rounds = 10
self.run_updater_test(client, params, num_rounds, dataset, 'approx')
def run_quantile(self, name: str) -> None:
exe: Optional[str] = None
for possible_path in {'./testxgboost', './build/testxgboost',
'../build/cpubuild/testxgboost',
'../cpu-build/testxgboost'}:
if os.path.exists(possible_path):
exe = possible_path
if exe is None:
return
test = "--gtest_filter=Quantile." + name
def runit(
worker_addr: str, rabit_args: Dict[str, Union[int, str]]
) -> subprocess.CompletedProcess:
# setup environment for running the c++ part.
env = os.environ.copy()
env['DMLC_TRACKER_PORT'] = str(rabit_args['DMLC_TRACKER_PORT'])
env["DMLC_TRACKER_URI"] = str(rabit_args["DMLC_TRACKER_URI"])
return subprocess.run([str(exe), test], env=env, capture_output=True)
with LocalCluster(n_workers=4, dashboard_address=":0") as cluster:
with Client(cluster) as client:
workers = tm.get_client_workers(client)
rabit_args = client.sync(
xgb.dask._get_rabit_args, len(workers), None, client
)
futures = client.map(runit,
workers,
pure=False,
workers=workers,
rabit_args=rabit_args)
results = client.gather(futures)
for ret in results:
msg = ret.stdout.decode('utf-8')
assert msg.find('1 test from Quantile') != -1, msg
assert ret.returncode == 0, msg
@pytest.mark.skipif(**tm.no_dask())
@pytest.mark.gtest
def test_quantile_basic(self) -> None:
self.run_quantile('DistributedBasic')
self.run_quantile('SortedDistributedBasic')
@pytest.mark.skipif(**tm.no_dask())
@pytest.mark.gtest
def test_quantile(self) -> None:
self.run_quantile('Distributed')
self.run_quantile('SortedDistributed')
@pytest.mark.skipif(**tm.no_dask())
@pytest.mark.gtest
def test_quantile_same_on_all_workers(self) -> None:
self.run_quantile("SameOnAllWorkers")
def test_adaptive(self) -> None:
def get_score(config: Dict) -> float:
return float(config["learner"]["learner_model_param"]["base_score"])
def local_test(rabit_args: Dict[str, Union[int, str]], worker_id: int) -> bool:
with xgb.dask.CommunicatorContext(**rabit_args):
if worker_id == 0:
y = np.array([0.0, 0.0, 0.0])
x = np.array([[0.0]] * 3)
else:
y = np.array([1000.0])
x = np.array(
[
[0.0],
]
)
Xy = xgb.DMatrix(x, y)
booster = xgb.train(
{"tree_method": "hist", "objective": "reg:absoluteerror"},
Xy,
num_boost_round=1,
)
config = json.loads(booster.save_config())
base_score = get_score(config)
assert base_score == 250.0
return True
with LocalCluster(n_workers=2, dashboard_address=":0") as cluster:
with Client(cluster) as client:
workers = tm.get_client_workers(client)
rabit_args = client.sync(
xgb.dask._get_rabit_args, len(workers), None, client
)
futures = []
for i, _ in enumerate(workers):
f = client.submit(local_test, rabit_args, i)
futures.append(f)
results = client.gather(futures)
assert all(results)
def test_n_workers(self) -> None:
with LocalCluster(n_workers=2, dashboard_address=":0") as cluster:
with Client(cluster) as client:
workers = tm.get_client_workers(client)
from sklearn.datasets import load_breast_cancer
X, y = load_breast_cancer(return_X_y=True)
dX = client.submit(da.from_array, X, workers=[workers[0]]).result()
dy = client.submit(da.from_array, y, workers=[workers[0]]).result()
train = xgb.dask.DaskDMatrix(client, dX, dy)
dX = dd.from_array(X)
dX = client.persist(dX, workers=workers[1])
dy = dd.from_array(y)
dy = client.persist(dy, workers=workers[1])
valid = xgb.dask.DaskDMatrix(client, dX, dy)
merged = xgb.dask._get_workers_from_data(train, evals=[(valid, 'Valid')])
assert len(merged) == 2
@pytest.mark.skipif(**tm.no_dask())
def test_feature_weights(self, client: "Client") -> None:
kRows = 1024
kCols = 64
rng = da.random.RandomState(1994)
X = rng.random_sample((kRows, kCols), chunks=(32, -1))
y = rng.random_sample(kRows, chunks=32)
fw = np.ones(shape=(kCols,))
for i in range(kCols):
fw[i] *= float(i)
fw = da.from_array(fw)
parser = os.path.join(tm.demo_dir(__file__), "json-model", "json_parser.py")
poly_increasing = get_feature_weights(
X, y, fw, parser, "approx", model=xgb.dask.DaskXGBRegressor
)
fw = np.ones(shape=(kCols,))
for i in range(kCols):
fw[i] *= float(kCols - i)
fw = da.from_array(fw)
poly_decreasing = get_feature_weights(
X, y, fw, parser, "approx", model=xgb.dask.DaskXGBRegressor
)
# Approxmated test, this is dependent on the implementation of random
# number generator in std library.
assert poly_increasing[0] > 0.08
assert poly_decreasing[0] < -0.08
@pytest.mark.skipif(**tm.no_dask())
@pytest.mark.skipif(**tm.no_sklearn())
def test_custom_objective(self, client: "Client") -> None:
from sklearn.datasets import fetch_california_housing
X, y = fetch_california_housing(return_X_y=True)
X, y = da.from_array(X), da.from_array(y)
rounds = 20
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, 'log')
def sqr(
labels: np.ndarray, predts: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
with open(path, 'a') as fd:
print('Running sqr', file=fd)
grad = predts - labels
hess = np.ones(shape=labels.shape[0])
return grad, hess
reg = xgb.dask.DaskXGBRegressor(n_estimators=rounds, objective=sqr,
tree_method='hist')
reg.fit(X, y, eval_set=[(X, y)])
# Check the obj is ran for rounds.
with open(path, 'r') as fd:
out = fd.readlines()
assert len(out) == rounds
results_custom = reg.evals_result()
reg = xgb.dask.DaskXGBRegressor(n_estimators=rounds, tree_method='hist')
reg.fit(X, y, eval_set=[(X, y)])
results_native = reg.evals_result()
np.testing.assert_allclose(results_custom['validation_0']['rmse'],
results_native['validation_0']['rmse'])
tm.non_increasing(results_native['validation_0']['rmse'])
def test_no_duplicated_partition(self) -> None:
'''Assert each worker has the correct amount of data, and DMatrix initialization doesn't
generate unnecessary copies of data.
'''
with LocalCluster(n_workers=2, dashboard_address=":0") as cluster:
with Client(cluster) as client:
X, y, _ = generate_array()
n_partitions = X.npartitions
m = xgb.dask.DaskDMatrix(client, X, y)
workers = tm.get_client_workers(client)
rabit_args = client.sync(
xgb.dask._get_rabit_args, len(workers), None, client
)
n_workers = len(workers)
def worker_fn(worker_addr: str, data_ref: Dict) -> None:
with xgb.dask.CommunicatorContext(**rabit_args):
local_dtrain = xgb.dask._dmatrix_from_list_of_parts(
**data_ref, nthread=7
)
total = np.array([local_dtrain.num_row()])
total = xgb.collective.allreduce(total, xgb.collective.Op.SUM)
assert total[0] == kRows
futures = []
for i in range(len(workers)):
futures.append(
client.submit(
worker_fn, workers[i],
m._create_fn_args(workers[i]), pure=False,
workers=[workers[i]])
)
client.gather(futures)
has_what = client.has_what()
cnt = 0
data = set()
for k, v in has_what.items():
for d in v:
cnt += 1
data.add(d)
assert len(data) == cnt
# Subtract the on disk resource from each worker
assert cnt - n_workers == n_partitions
def test_data_initialization(self, client: "Client") -> None:
"""assert that we don't create duplicated DMatrix"""
from sklearn.datasets import load_digits
X, y = load_digits(return_X_y=True)
X, y = dd.from_array(X, chunksize=32), dd.from_array(y, chunksize=32)
validate_data_initialization(
xgb.dask.DaskDMatrix, xgb.dask.DaskXGBClassifier, X, y
)
def run_shap(self, X: Any, y: Any, params: Dict[str, Any], client: "Client") -> None:
rows = X.shape[0]
cols = X.shape[1]
def assert_shape(shape: Tuple[int, ...]) -> None:
assert shape[0] == rows
if "num_class" in params.keys():
assert shape[1] == params["num_class"]
assert shape[2] == cols + 1
else:
assert shape[1] == cols + 1
X, y = da.from_array(X, chunks=(32, -1)), da.from_array(y, chunks=32)
Xy = xgb.dask.DaskDMatrix(client, X, y)
booster = xgb.dask.train(client, params, Xy, num_boost_round=10)['booster']
test_Xy = xgb.dask.DaskDMatrix(client, X, y)
shap = xgb.dask.predict(client, booster, test_Xy, pred_contribs=True).compute()
margin = xgb.dask.predict(client, booster, test_Xy, output_margin=True).compute()
assert_shape(shap.shape)
assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-5, 1e-5)
shap = xgb.dask.predict(client, booster, X, pred_contribs=True).compute()
margin = xgb.dask.predict(client, booster, X, output_margin=True).compute()
assert_shape(shap.shape)
assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-5, 1e-5)
if "num_class" not in params.keys():
X = dd.from_dask_array(X).repartition(npartitions=32)
y = dd.from_dask_array(y).repartition(npartitions=32)
shap_df = xgb.dask.predict(
client, booster, X, pred_contribs=True, validate_features=False
).compute()
assert_shape(shap_df.shape)
assert np.allclose(
np.sum(shap_df, axis=len(shap_df.shape) - 1), margin, 1e-5, 1e-5
)
def run_shap_cls_sklearn(self, X: Any, y: Any, client: "Client") -> None:
X, y = da.from_array(X, chunks=(32, -1)), da.from_array(y, chunks=32)
cls = xgb.dask.DaskXGBClassifier(n_estimators=4)
cls.client = client
cls.fit(X, y)
booster = cls.get_booster()
test_Xy = xgb.dask.DaskDMatrix(client, X, y)
shap = xgb.dask.predict(client, booster, test_Xy, pred_contribs=True).compute()
margin = xgb.dask.predict(client, booster, test_Xy, output_margin=True).compute()
assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-5, 1e-5)
shap = xgb.dask.predict(client, booster, X, pred_contribs=True).compute()
margin = xgb.dask.predict(client, booster, X, output_margin=True).compute()
assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-5, 1e-5)
def test_shap(self, client: "Client") -> None:
from sklearn.datasets import load_diabetes, load_iris
X, y = load_diabetes(return_X_y=True)
params: Dict[str, Any] = {'objective': 'reg:squarederror'}
self.run_shap(X, y, params, client)
X, y = load_iris(return_X_y=True)
params = {'objective': 'multi:softmax', 'num_class': 3}
self.run_shap(X, y, params, client)
params = {'objective': 'multi:softprob', 'num_class': 3}
self.run_shap(X, y, params, client)
self.run_shap_cls_sklearn(X, y, client)
def run_shap_interactions(
self,
X: Any,
y: Any,
params: Dict[str, Any],
client: "Client"
) -> None:
rows = X.shape[0]
cols = X.shape[1]
X, y = da.from_array(X, chunks=(32, -1)), da.from_array(y, chunks=32)
Xy = xgb.dask.DaskDMatrix(client, X, y)
booster = xgb.dask.train(client, params, Xy, num_boost_round=10)['booster']
test_Xy = xgb.dask.DaskDMatrix(client, X, y)
shap = xgb.dask.predict(
client, booster, test_Xy, pred_interactions=True
).compute()
assert len(shap.shape) == 3
assert shap.shape[0] == rows
assert shap.shape[1] == cols + 1
assert shap.shape[2] == cols + 1
margin = xgb.dask.predict(client, booster, test_Xy, output_margin=True).compute()
assert np.allclose(np.sum(shap, axis=(len(shap.shape) - 1, len(shap.shape) - 2)),
margin,
1e-5, 1e-5)
def test_shap_interactions(self, client: "Client") -> None:
from sklearn.datasets import load_diabetes
X, y = load_diabetes(return_X_y=True)
params = {'objective': 'reg:squarederror'}
self.run_shap_interactions(X, y, params, client)
@pytest.mark.skipif(**tm.no_sklearn())
def test_sklearn_io(self, client: 'Client') -> None:
from sklearn.datasets import load_digits
X_, y_ = load_digits(return_X_y=True)
X, y = da.from_array(X_), da.from_array(y_)
cls = xgb.dask.DaskXGBClassifier(n_estimators=10)
cls.client = client
cls.fit(X, y)
predt_0 = cls.predict(X)
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, "model.pkl")
with open(path, "wb") as fd:
pickle.dump(cls, fd)
with open(path, "rb") as fd:
cls = pickle.load(fd)
predt_1 = cls.predict(X)
np.testing.assert_allclose(predt_0.compute(), predt_1.compute())
path = os.path.join(tmpdir, 'cls.json')
cls.save_model(path)
cls = xgb.dask.DaskXGBClassifier()
cls.load_model(path)
assert cls.n_classes_ == 10
predt_2 = cls.predict(X)
np.testing.assert_allclose(predt_0.compute(), predt_2.compute())
# Use single node to load
cls = xgb.XGBClassifier()
cls.load_model(path)
assert cls.n_classes_ == 10
predt_3 = cls.predict(X_)
np.testing.assert_allclose(predt_0.compute(), predt_3)
def test_dask_unsupported_features(client: "Client") -> None:
X, y, _ = generate_array()
# gblinear doesn't support distributed training.
with pytest.raises(NotImplementedError, match="gblinear"):
xgb.dask.train(
client, {"booster": "gblinear"}, xgb.dask.DaskDMatrix(client, X, y)
)
def test_parallel_submits(client: "Client") -> None:
"""Test for running multiple train simultaneously from single clients."""
try:
from distributed import MultiLock # NOQA
except ImportError:
pytest.skip("`distributed.MultiLock' is not available")
from sklearn.datasets import load_digits
futures = []
workers = tm.get_client_workers(client)
n_submits = len(workers)
for i in range(n_submits):
X_, y_ = load_digits(return_X_y=True)
X = dd.from_array(X_, chunksize=32)
y = dd.from_array(y_, chunksize=32)
cls = xgb.dask.DaskXGBClassifier(
verbosity=1,
n_estimators=i + 1,
eval_metric="merror",
)
f = client.submit(cls.fit, X, y, pure=False)
futures.append(f)
classifiers = client.gather(futures)
assert len(classifiers) == n_submits
for i, cls in enumerate(classifiers):
assert cls.get_booster().num_boosted_rounds() == i + 1
def run_tree_stats(client: Client, tree_method: str) -> str:
"""assert that different workers count dosn't affect summ statistic's on root"""
def dask_train(X, y, num_obs, num_features):
chunk_size = 100
X = da.from_array(X, chunks=(chunk_size, num_features))
y = da.from_array(y.reshape(num_obs, 1), chunks=(chunk_size, 1))
dtrain = xgb.dask.DaskDMatrix(client, X, y)
output = xgb.dask.train(
client,
{
"verbosity": 0,
"tree_method": tree_method,
"objective": "reg:squarederror",
"max_depth": 3,
},
dtrain,
num_boost_round=1,
)
dump_model = output["booster"].get_dump(with_stats=True, dump_format="json")[0]
return json.loads(dump_model)
num_obs = 1000
num_features = 10
X, y = make_regression(num_obs, num_features, random_state=777)
model = dask_train(X, y, num_obs, num_features)
# asserts children have correct cover.
stack = [model]
while stack:
node: dict = stack.pop()
if "leaf" in node.keys():
continue
cover = 0
for c in node["children"]:
cover += c["cover"]
stack.append(c)
assert cover == node["cover"]
return model["cover"]
@pytest.mark.parametrize("tree_method", ["hist", "approx"])
def test_tree_stats(tree_method: str) -> None:
with LocalCluster(n_workers=1, dashboard_address=":0") as cluster:
with Client(cluster) as client:
local = run_tree_stats(client, tree_method)
with LocalCluster(n_workers=2, dashboard_address=":0") as cluster:
with Client(cluster) as client:
distributed = run_tree_stats(client, tree_method)
assert local == distributed
def test_parallel_submit_multi_clients() -> None:
"""Test for running multiple train simultaneously from multiple clients."""
try:
from distributed import MultiLock # NOQA
except ImportError:
pytest.skip("`distributed.MultiLock' is not available")
from sklearn.datasets import load_digits
with LocalCluster(n_workers=4, dashboard_address=":0") as cluster:
with Client(cluster) as client:
workers = tm.get_client_workers(client)
n_submits = len(workers)
assert n_submits == 4
futures = []
for i in range(n_submits):
client = Client(cluster)
X_, y_ = load_digits(return_X_y=True)
X_ += 1.0
X = dd.from_array(X_, chunksize=32)
y = dd.from_array(y_, chunksize=32)
cls = xgb.dask.DaskXGBClassifier(
verbosity=1,
n_estimators=i + 1,
eval_metric="merror",
)
f = client.submit(cls.fit, X, y, pure=False)
futures.append((client, f))
t_futures = []
with ThreadPoolExecutor(max_workers=16) as e:
for i in range(n_submits):
def _() -> xgb.dask.DaskXGBClassifier:
return futures[i][0].compute(futures[i][1]).result()
f = e.submit(_)
t_futures.append(f)
for i, f in enumerate(t_futures):
assert f.result().get_booster().num_boosted_rounds() == i + 1
class TestDaskCallbacks:
@pytest.mark.skipif(**tm.no_sklearn())
def test_early_stopping(self, client: "Client") -> None:
from sklearn.datasets import load_breast_cancer
X, y = load_breast_cancer(return_X_y=True)
X, y = da.from_array(X), da.from_array(y)
m = xgb.dask.DaskDMatrix(client, X, y)
valid = xgb.dask.DaskDMatrix(client, X, y)
early_stopping_rounds = 5
booster = xgb.dask.train(client, {'objective': 'binary:logistic',
'eval_metric': 'error',
'tree_method': 'hist'}, m,
evals=[(valid, 'Valid')],
num_boost_round=1000,
early_stopping_rounds=early_stopping_rounds)['booster']
assert hasattr(booster, 'best_score')
dump = booster.get_dump(dump_format='json')
assert len(dump) - booster.best_iteration == early_stopping_rounds + 1
valid_X, valid_y = load_breast_cancer(return_X_y=True)
valid_X, valid_y = da.from_array(valid_X), da.from_array(valid_y)
cls = xgb.dask.DaskXGBClassifier(objective='binary:logistic', tree_method='hist',
n_estimators=1000)
cls.client = client
cls.fit(X, y, early_stopping_rounds=early_stopping_rounds,
eval_set=[(valid_X, valid_y)])
booster = cls.get_booster()
dump = booster.get_dump(dump_format='json')
assert len(dump) - booster.best_iteration == early_stopping_rounds + 1
# Specify the metric
cls = xgb.dask.DaskXGBClassifier(objective='binary:logistic', tree_method='hist',
n_estimators=1000)
cls.client = client
cls.fit(X, y, early_stopping_rounds=early_stopping_rounds,
eval_set=[(valid_X, valid_y)], eval_metric='error')
assert tm.non_increasing(cls.evals_result()['validation_0']['error'])
booster = cls.get_booster()
dump = booster.get_dump(dump_format='json')
assert len(cls.evals_result()['validation_0']['error']) < 20
assert len(dump) - booster.best_iteration == early_stopping_rounds + 1
@pytest.mark.skipif(**tm.no_sklearn())
def test_early_stopping_custom_eval(self, client: "Client") -> None:
from sklearn.datasets import load_breast_cancer
X, y = load_breast_cancer(return_X_y=True)
X, y = da.from_array(X), da.from_array(y)
m = xgb.dask.DaskDMatrix(client, X, y)
valid = xgb.dask.DaskDMatrix(client, X, y)
early_stopping_rounds = 5
booster = xgb.dask.train(
client, {'objective': 'binary:logistic',
'eval_metric': 'error',
'tree_method': 'hist'}, m,
evals=[(m, 'Train'), (valid, 'Valid')],
feval=tm.eval_error_metric,
num_boost_round=1000,
early_stopping_rounds=early_stopping_rounds)['booster']
assert hasattr(booster, 'best_score')
dump = booster.get_dump(dump_format='json')
assert len(dump) - booster.best_iteration == early_stopping_rounds + 1
valid_X, valid_y = load_breast_cancer(return_X_y=True)
valid_X, valid_y = da.from_array(valid_X), da.from_array(valid_y)
cls = xgb.dask.DaskXGBClassifier(
objective='binary:logistic',
tree_method='hist',
n_estimators=1000,
eval_metric=tm.eval_error_metric_skl
)
cls.client = client
cls.fit(
X, y, early_stopping_rounds=early_stopping_rounds, eval_set=[(valid_X, valid_y)]
)
booster = cls.get_booster()
dump = booster.get_dump(dump_format='json')
assert len(dump) - booster.best_iteration == early_stopping_rounds + 1
@pytest.mark.skipif(**tm.no_sklearn())
def test_callback(self, client: "Client") -> None:
from sklearn.datasets import load_breast_cancer
X, y = load_breast_cancer(return_X_y=True)
X, y = da.from_array(X), da.from_array(y)
cls = xgb.dask.DaskXGBClassifier(objective='binary:logistic', tree_method='hist',
n_estimators=10)
cls.client = client
with tempfile.TemporaryDirectory() as tmpdir:
cls.fit(X, y, callbacks=[xgb.callback.TrainingCheckPoint(
directory=Path(tmpdir),
iterations=1,
name='model'
)])
for i in range(1, 10):
assert os.path.exists(
os.path.join(tmpdir, 'model_' + str(i) + '.json'))
| {
"content_hash": "2cc658fc8c81486ec71cc8bc93257e3e",
"timestamp": "",
"source": "github",
"line_count": 2095,
"max_line_length": 96,
"avg_line_length": 37.03723150357995,
"alnum_prop": 0.5858389287693477,
"repo_name": "dmlc/xgboost",
"id": "fbe5607a1240272bff9110bf3232e4cfc7737c4e",
"size": "77593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_distributed/test_with_dask/test_with_dask.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1383"
},
{
"name": "C",
"bytes": "23067"
},
{
"name": "C++",
"bytes": "2182522"
},
{
"name": "CMake",
"bytes": "52394"
},
{
"name": "CSS",
"bytes": "3812"
},
{
"name": "Cuda",
"bytes": "855374"
},
{
"name": "Dockerfile",
"bytes": "2364"
},
{
"name": "Groovy",
"bytes": "1251"
},
{
"name": "Java",
"bytes": "206549"
},
{
"name": "M4",
"bytes": "2131"
},
{
"name": "Makefile",
"bytes": "8179"
},
{
"name": "PowerShell",
"bytes": "4308"
},
{
"name": "Python",
"bytes": "1189411"
},
{
"name": "R",
"bytes": "342898"
},
{
"name": "Scala",
"bytes": "471040"
},
{
"name": "Shell",
"bytes": "45815"
},
{
"name": "TeX",
"bytes": "913"
}
],
"symlink_target": ""
} |
from warnings import warn
try: # python 3+
from inspect import signature
except ImportError:
from funcsigs import signature
try:
from typing import Any, Tuple, Union, Dict, TypeVar, Callable, Iterable, Sized
try:
from typing import Type
except ImportError:
pass
T = TypeVar('T')
except ImportError:
pass
from autoclass.utils import is_attr_selected, method_already_there, check_known_decorators, read_fields, \
__AUTOCLASS_OVERRIDE_ANNOTATION, iterate_on_vars
from decopatch import class_decorator, DECORATED
@class_decorator
def autorepr(include=None, # type: Union[str, Tuple[str]]
exclude=None, # type: Union[str, Tuple[str]]
only_known_fields=True, # type: bool
only_public_fields=True, # type: bool
curly_string_repr=False, # type: bool
cls=DECORATED
):
"""
A decorator to generate str and repr method for class cls if not already implemented
Parameters allow to customize the list of fields that will be visible in the representation.
:param include: a tuple of explicit attribute names to include (None means all)
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None.
:param only_known_fields: if True (default), only known fields (constructor arguments or pyfields fields) will be
exposed through the str/repr view, not any other field that would be created in the constructor or
dynamically. If set to False, the representation is a direct view of *all* public object fields. This view can be
filtered with include/exclude and private fields can be made visible by setting only_public_fields to false
:param only_public_fields: this parameter is only used when only_constructor_args is set to False. If
only_public_fields is set to False, all fields are visible. Otherwise (default), class-private fields will be
hidden
:param curly_string_repr: turn this to `True` to get the curly string representation `{%r: %r, ...}` instead of
the default one `(%s=%r, ...)`
:return:
"""
return autorepr_decorate(cls, include=include, exclude=exclude, curly_string_repr=curly_string_repr,
only_public_fields=only_public_fields, only_known_fields=only_known_fields)
def autorepr_decorate(cls, # type: Type[T]
include=None, # type: Union[str, Tuple[str]]
exclude=None, # type: Union[str, Tuple[str]]
only_known_fields=True, # type: bool
only_public_fields=True, # type: bool
curly_string_repr=False, # type: bool
):
# type: (...) -> Type[T]
"""
To automatically generate the appropriate str and repr methods, without using @autoeq decorator.
:param cls: the class on which to execute. Note that it won't be wrapped.
:param include: a tuple of explicit attribute names to include (None means all)
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None.
:param only_known_fields: if True (default), only known fields (constructor arguments or pyfields fields) will be
exposed through the str/repr view, not any other field that would be created in the constructor or
dynamically. If set to False, the representation is a direct view of *all* public object fields. This view can be
filtered with include/exclude and private fields can be made visible by setting only_public_fields to false
:param only_public_fields: this parameter is only used when only_constructor_args is set to False. If
only_public_fields is set to False, all fields are visible. Otherwise (default), class-private fields will be
hidden
:param curly_string_repr: turn this to `True` to get the curly string representation `{%r: %r, ...}` instead of
the default one `(%s=%r, ...)`
:return:
"""
# first check that we do not conflict with other known decorators
check_known_decorators(cls, '@autorepr')
# perform the class mod
if only_known_fields:
# retrieve the list of fields from pyfields or constructor signature
selected_names, source = read_fields(cls, include=include, exclude=exclude, caller="@autorepr")
# add autohash with explicit list
execute_autorepr_on_class(cls, selected_names=selected_names, curly_string_repr=curly_string_repr)
else:
# no explicit list
execute_autorepr_on_class(cls, include=include, exclude=exclude, public_fields_only=only_public_fields,
curly_string_repr=curly_string_repr)
return cls
def execute_autorepr_on_class(cls, # type: Type[T]
selected_names=None, # type: Iterable[str]
include=None, # type: Union[str, Tuple[str]]
exclude=None, # type: Union[str, Tuple[str]]
public_fields_only=True, # type: bool
curly_string_repr=False, # type: bool
):
"""
This method overrides str and repr method if not already implemented
Parameters allow to customize the list of fields that will be visible.
:param cls: the class on which to execute.
:param selected_names: an explicit list of attribute names that should be used in the dict. If this is provided,
`include`, `exclude` and `public_fields_only` should be left as default as they are not used.
:param include: a tuple of explicit attribute names to include (None means all). This parameter is only used when
`selected_names` is not provided.
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None. This
parameter is only used when `selected_names` is not provided.
:param public_fields_only: this parameter is only used when `selected_names` is not provided. If
public_fields_only is set to False, all fields are visible. Otherwise (default), class-private fields will be
hidden from the exposed str/repr view.
:param curly_string_repr: turn this to `True` to get the curly string representation `{%r: %r, ...}` instead of
the default one `(%s=%r, ...)`
:return:
"""
if selected_names is not None:
# case (a) hardcoded list - easy: we know the exact list of fields to make visible
if include is not None or exclude is not None or public_fields_only is not True:
raise ValueError("`selected_names` can not be used together with `include`, `exclude` or "
"`public_fields_only`")
str_repr_methods = create_repr_methods_for_hardcoded_list(selected_names, curly_mode=curly_string_repr)
else:
# case (b) the list of fields is not predetermined, it will depend on vars(self)
if include is None and exclude is None and not public_fields_only:
# easy: all vars() are exposed
str_repr_methods = create_repr_methods_for_object_vars(curly_mode=curly_string_repr)
else:
# harder: all fields are allowed, but there are filters on this dynamic list
# private_name_prefix = '_' + object_type.__name__ + '_'
private_name_prefix = '_' if public_fields_only else None
str_repr_methods = create_repr_methods_for_object_vars_with_filters(curly_mode=curly_string_repr,
include=include, exclude=exclude,
private_name_prefix=private_name_prefix)
if method_already_there(cls, '__str__', this_class_only=True):
if not hasattr(cls.__str__, __AUTOCLASS_OVERRIDE_ANNOTATION):
warn('__str__ is already defined on class %s, it will be overridden with the one generated by '
'@autorepr/@autoclass ! If you want to use your version, annotate it with @autoclass_override'
% cls)
cls.__str__ = str_repr_methods.str
else:
cls.__str__ = str_repr_methods.str
if method_already_there(cls, '__repr__', this_class_only=True):
if not hasattr(cls.__repr__, __AUTOCLASS_OVERRIDE_ANNOTATION):
warn('__repr__ is already defined on class %s, it will be overridden with the one generated by '
'@autorepr/@autoclass ! If you want to use your version, annotate it with @autoclass_override'
% cls)
cls.__repr__ = str_repr_methods.repr
else:
cls.__repr__ = str_repr_methods.repr
class ReprMethods(object):
"""
Container used in @autodict to exchange the various methods created
"""
__slots__ = 'str', 'repr'
def __init__(self, str, repr):
self.str = str
self.repr = repr
def create_repr_methods_for_hardcoded_list(selected_names, # type: Union[Sized, Iterable[str]]
curly_mode # type: bool
):
# type: (...) -> ReprMethods
"""
:param selected_names:
:param curly_mode:
:return:
"""
if not curly_mode:
def __repr__(self):
"""
Generated by @autorepr. Relies on the hardcoded list of field names and "getattr" (object) for the value.
"""
return '%s(%s)' % (self.__class__.__name__,
', '.join('%s=%r' % (k, getattr(self, k)) for k in selected_names))
else:
def __repr__(self):
"""
Generated by @autorepr. Relies on the hardcoded list of field names and "getattr" (object) for the value.
"""
return '%s(**{%s})' % (self.__class__.__name__,
', '.join('%r: %r' % (k, getattr(self, k)) for k in selected_names))
return ReprMethods(str=__repr__, repr=__repr__)
def create_repr_methods_for_object_vars(curly_mode # type: bool
):
# type: (...) -> ReprMethods
"""
:param curly_mode:
:return:
"""
if not curly_mode:
def __repr__(self):
"""
Generated by @autorepr. Relies on the list of vars() and "getattr" (object) for the value.
"""
return '%s(%s)' % (self.__class__.__name__, ', '.join('%s=%r' % (k, getattr(self, k))
for k in iterate_on_vars(self)))
else:
def __repr__(self):
"""
Generated by @autorepr. Relies on the list of vars() and "getattr" (object) for the value.
"""
return '%s(**{%s})' % (self.__class__.__name__, ', '.join('%r: %r' % (k, getattr(self, k))
for k in iterate_on_vars(self)))
return ReprMethods(str=__repr__, repr=__repr__)
def create_repr_methods_for_object_vars_with_filters(curly_mode, # type: bool
include, # type: Union[str, Tuple[str]]
exclude, # type: Union[str, Tuple[str]]
private_name_prefix=None # type: str
):
# type: (...) -> ReprMethods
"""
:param curly_mode:
:param include:
:param exclude:
:param private_name_prefix:
:return:
"""
public_fields_only = private_name_prefix is not None
def _vars_iterator(self):
"""
Filters the vars(self) according to include/exclude/public_fields_only
:param self:
:return:
"""
for att_name in iterate_on_vars(self):
# filter based on the name (include/exclude + private/public)
if is_attr_selected(att_name, include=include, exclude=exclude) and \
(not public_fields_only or not att_name.startswith(private_name_prefix)):
# use it
yield att_name, getattr(self, att_name)
if not curly_mode:
def __repr__(self):
"""
Generated by @autorepr. Relies on the filtered list of vars() and "getattr" (object) for the value.
"""
return '%s(%s)' % (self.__class__.__name__, ', '.join('%s=%r' % (k, v) for k, v in _vars_iterator(self)))
else:
def __repr__(self):
"""
Generated by @autorepr. Relies on the filtered list of vars() and "getattr" (object) for the value.
"""
return '%s(**{%s})' % (self.__class__.__name__,
', '.join('%r: %r' % (k, v) for k, v in _vars_iterator(self)))
return ReprMethods(str=__repr__, repr=__repr__)
| {
"content_hash": "128bc5597ad140bcc34e4caf72e78c38",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 121,
"avg_line_length": 48.167883211678834,
"alnum_prop": 0.5757690559175632,
"repo_name": "smarie/python-classtools-autocode",
"id": "b5e0b3a11ed2f55e09a4e8f584b07d85ac6c7200",
"size": "13324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoclass/autorepr_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "60848"
}
],
"symlink_target": ""
} |
"""
Support for MQTT cover devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.mqtt/
"""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components import mqtt, cover
from homeassistant.components.cover import (
CoverDevice, ATTR_TILT_POSITION, SUPPORT_OPEN_TILT,
SUPPORT_CLOSE_TILT, SUPPORT_STOP_TILT, SUPPORT_SET_TILT_POSITION,
SUPPORT_OPEN, SUPPORT_CLOSE, SUPPORT_STOP, SUPPORT_SET_POSITION,
ATTR_POSITION)
from homeassistant.exceptions import TemplateError
from homeassistant.const import (
CONF_NAME, CONF_VALUE_TEMPLATE, CONF_OPTIMISTIC, STATE_OPEN,
STATE_CLOSED, STATE_UNKNOWN, CONF_DEVICE)
from homeassistant.components.mqtt import (
ATTR_DISCOVERY_HASH, CONF_AVAILABILITY_TOPIC, CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC, CONF_PAYLOAD_AVAILABLE, CONF_PAYLOAD_NOT_AVAILABLE,
CONF_QOS, CONF_RETAIN, valid_publish_topic, valid_subscribe_topic,
MqttAvailability, MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
from homeassistant.components.mqtt.discovery import MQTT_DISCOVERY_NEW
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import HomeAssistantType, ConfigType
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
CONF_GET_POSITION_TOPIC = 'position_topic'
CONF_TILT_COMMAND_TOPIC = 'tilt_command_topic'
CONF_TILT_STATUS_TOPIC = 'tilt_status_topic'
CONF_SET_POSITION_TOPIC = 'set_position_topic'
CONF_SET_POSITION_TEMPLATE = 'set_position_template'
CONF_PAYLOAD_OPEN = 'payload_open'
CONF_PAYLOAD_CLOSE = 'payload_close'
CONF_PAYLOAD_STOP = 'payload_stop'
CONF_STATE_OPEN = 'state_open'
CONF_STATE_CLOSED = 'state_closed'
CONF_POSITION_OPEN = 'position_open'
CONF_POSITION_CLOSED = 'position_closed'
CONF_TILT_CLOSED_POSITION = 'tilt_closed_value'
CONF_TILT_OPEN_POSITION = 'tilt_opened_value'
CONF_TILT_MIN = 'tilt_min'
CONF_TILT_MAX = 'tilt_max'
CONF_TILT_STATE_OPTIMISTIC = 'tilt_optimistic'
CONF_TILT_INVERT_STATE = 'tilt_invert_state'
CONF_UNIQUE_ID = 'unique_id'
TILT_PAYLOAD = "tilt"
COVER_PAYLOAD = "cover"
DEFAULT_NAME = 'MQTT Cover'
DEFAULT_PAYLOAD_OPEN = 'OPEN'
DEFAULT_PAYLOAD_CLOSE = 'CLOSE'
DEFAULT_PAYLOAD_STOP = 'STOP'
DEFAULT_POSITION_OPEN = 100
DEFAULT_POSITION_CLOSED = 0
DEFAULT_OPTIMISTIC = False
DEFAULT_RETAIN = False
DEFAULT_TILT_CLOSED_POSITION = 0
DEFAULT_TILT_OPEN_POSITION = 100
DEFAULT_TILT_MIN = 0
DEFAULT_TILT_MAX = 100
DEFAULT_TILT_OPTIMISTIC = False
DEFAULT_TILT_INVERT_STATE = False
OPEN_CLOSE_FEATURES = (SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP)
TILT_FEATURES = (SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_STOP_TILT |
SUPPORT_SET_TILT_POSITION)
def validate_options(value):
"""Validate options.
If set postion topic is set then get position topic is set as well.
"""
if (CONF_SET_POSITION_TOPIC in value and
CONF_GET_POSITION_TOPIC not in value):
raise vol.Invalid(
"set_position_topic must be set together with position_topic.")
return value
PLATFORM_SCHEMA = vol.All(mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_SET_POSITION_TOPIC): valid_publish_topic,
vol.Optional(CONF_SET_POSITION_TEMPLATE): cv.template,
vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_GET_POSITION_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_OPEN, default=DEFAULT_PAYLOAD_OPEN): cv.string,
vol.Optional(CONF_PAYLOAD_CLOSE, default=DEFAULT_PAYLOAD_CLOSE): cv.string,
vol.Optional(CONF_PAYLOAD_STOP, default=DEFAULT_PAYLOAD_STOP): cv.string,
vol.Optional(CONF_STATE_OPEN, default=STATE_OPEN): cv.string,
vol.Optional(CONF_STATE_CLOSED, default=STATE_CLOSED): cv.string,
vol.Optional(CONF_POSITION_OPEN,
default=DEFAULT_POSITION_OPEN): int,
vol.Optional(CONF_POSITION_CLOSED,
default=DEFAULT_POSITION_CLOSED): int,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_TILT_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_TILT_STATUS_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_TILT_CLOSED_POSITION,
default=DEFAULT_TILT_CLOSED_POSITION): int,
vol.Optional(CONF_TILT_OPEN_POSITION,
default=DEFAULT_TILT_OPEN_POSITION): int,
vol.Optional(CONF_TILT_MIN, default=DEFAULT_TILT_MIN): int,
vol.Optional(CONF_TILT_MAX, default=DEFAULT_TILT_MAX): int,
vol.Optional(CONF_TILT_STATE_OPTIMISTIC,
default=DEFAULT_TILT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_TILT_INVERT_STATE,
default=DEFAULT_TILT_INVERT_STATE): cv.boolean,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema), validate_options)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities, discovery_info=None):
"""Set up MQTT cover through configuration.yaml."""
await _async_setup_entity(config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT cover dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add an MQTT cover."""
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities,
discovery_payload[ATTR_DISCOVERY_HASH])
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(cover.DOMAIN, 'mqtt'),
async_discover)
async def _async_setup_entity(config, async_add_entities, discovery_hash=None):
"""Set up the MQTT Cover."""
async_add_entities([MqttCover(config, discovery_hash)])
class MqttCover(MqttAvailability, MqttDiscoveryUpdate, MqttEntityDeviceInfo,
CoverDevice):
"""Representation of a cover that can be controlled using MQTT."""
def __init__(self, config, discovery_hash):
"""Initialize the cover."""
self._unique_id = config.get(CONF_UNIQUE_ID)
self._position = None
self._state = None
self._sub_state = None
self._optimistic = None
self._tilt_value = None
self._tilt_optimistic = None
# Load config
self._setup_from_config(config)
availability_topic = config.get(CONF_AVAILABILITY_TOPIC)
payload_available = config.get(CONF_PAYLOAD_AVAILABLE)
payload_not_available = config.get(CONF_PAYLOAD_NOT_AVAILABLE)
qos = config.get(CONF_QOS)
device_config = config.get(CONF_DEVICE)
MqttAvailability.__init__(self, availability_topic, qos,
payload_available, payload_not_available)
MqttDiscoveryUpdate.__init__(self, discovery_hash,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config)
async def async_added_to_hass(self):
"""Subscribe MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.availability_discovery_update(config)
await self._subscribe_topics()
self.async_schedule_update_ha_state()
def _setup_from_config(self, config):
self._config = config
self._optimistic = (config.get(CONF_OPTIMISTIC) or
(config.get(CONF_STATE_TOPIC) is None and
config.get(CONF_GET_POSITION_TOPIC) is None))
self._tilt_optimistic = config.get(CONF_TILT_STATE_OPTIMISTIC)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
template.hass = self.hass
set_position_template = self._config.get(CONF_SET_POSITION_TEMPLATE)
if set_position_template is not None:
set_position_template.hass = self.hass
topics = {}
@callback
def tilt_updated(topic, payload, qos):
"""Handle tilt updates."""
if (payload.isnumeric() and
(self._config.get(CONF_TILT_MIN) <= int(payload) <=
self._config.get(CONF_TILT_MAX))):
level = self.find_percentage_in_range(float(payload))
self._tilt_value = level
self.async_schedule_update_ha_state()
@callback
def state_message_received(topic, payload, qos):
"""Handle new MQTT state messages."""
if template is not None:
payload = template.async_render_with_possible_json_value(
payload)
if payload == self._config.get(CONF_STATE_OPEN):
self._state = False
elif payload == self._config.get(CONF_STATE_CLOSED):
self._state = True
else:
_LOGGER.warning("Payload is not True or False: %s", payload)
return
self.async_schedule_update_ha_state()
@callback
def position_message_received(topic, payload, qos):
"""Handle new MQTT state messages."""
if template is not None:
payload = template.async_render_with_possible_json_value(
payload)
if payload.isnumeric():
percentage_payload = self.find_percentage_in_range(
float(payload), COVER_PAYLOAD)
self._position = percentage_payload
self._state = percentage_payload == DEFAULT_POSITION_CLOSED
else:
_LOGGER.warning(
"Payload is not integer within range: %s",
payload)
return
self.async_schedule_update_ha_state()
if self._config.get(CONF_GET_POSITION_TOPIC):
topics['get_position_topic'] = {
'topic': self._config.get(CONF_GET_POSITION_TOPIC),
'msg_callback': position_message_received,
'qos': self._config.get(CONF_QOS)}
elif self._config.get(CONF_STATE_TOPIC):
topics['state_topic'] = {
'topic': self._config.get(CONF_STATE_TOPIC),
'msg_callback': state_message_received,
'qos': self._config.get(CONF_QOS)}
else:
# Force into optimistic mode.
self._optimistic = True
if self._config.get(CONF_TILT_STATUS_TOPIC) is None:
self._tilt_optimistic = True
else:
self._tilt_optimistic = False
self._tilt_value = STATE_UNKNOWN
topics['tilt_status_topic'] = {
'topic': self._config.get(CONF_TILT_STATUS_TOPIC),
'msg_callback': tilt_updated,
'qos': self._config.get(CONF_QOS)}
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
topics)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def name(self):
"""Return the name of the cover."""
return self._config.get(CONF_NAME)
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._state
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._position
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt."""
return self._tilt_value
@property
def supported_features(self):
"""Flag supported features."""
supported_features = 0
if self._config.get(CONF_COMMAND_TOPIC) is not None:
supported_features = OPEN_CLOSE_FEATURES
if self._config.get(CONF_SET_POSITION_TOPIC) is not None:
supported_features |= SUPPORT_SET_POSITION
if self._config.get(CONF_TILT_COMMAND_TOPIC) is not None:
supported_features |= TILT_FEATURES
return supported_features
async def async_open_cover(self, **kwargs):
"""Move the cover up.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_OPEN), self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic:
# Optimistically assume that cover has changed state.
self._state = False
if self._config.get(CONF_GET_POSITION_TOPIC):
self._position = self.find_percentage_in_range(
self._config.get(CONF_POSITION_OPEN), COVER_PAYLOAD)
self.async_schedule_update_ha_state()
async def async_close_cover(self, **kwargs):
"""Move the cover down.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_CLOSE), self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic:
# Optimistically assume that cover has changed state.
self._state = True
if self._config.get(CONF_GET_POSITION_TOPIC):
self._position = self.find_percentage_in_range(
self._config.get(CONF_POSITION_CLOSED), COVER_PAYLOAD)
self.async_schedule_update_ha_state()
async def async_stop_cover(self, **kwargs):
"""Stop the device.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config.get(CONF_PAYLOAD_STOP), self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
async def async_open_cover_tilt(self, **kwargs):
"""Tilt the cover open."""
mqtt.async_publish(self.hass,
self._config.get(CONF_TILT_COMMAND_TOPIC),
self._config.get(CONF_TILT_OPEN_POSITION),
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._tilt_optimistic:
self._tilt_value = self._config.get(CONF_TILT_OPEN_POSITION)
self.async_schedule_update_ha_state()
async def async_close_cover_tilt(self, **kwargs):
"""Tilt the cover closed."""
mqtt.async_publish(self.hass,
self._config.get(CONF_TILT_COMMAND_TOPIC),
self._config.get(CONF_TILT_CLOSED_POSITION),
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._tilt_optimistic:
self._tilt_value = self._config.get(CONF_TILT_CLOSED_POSITION)
self.async_schedule_update_ha_state()
async def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
if ATTR_TILT_POSITION not in kwargs:
return
position = float(kwargs[ATTR_TILT_POSITION])
# The position needs to be between min and max
level = self.find_in_range_from_percent(position)
mqtt.async_publish(self.hass,
self._config.get(CONF_TILT_COMMAND_TOPIC),
level,
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
set_position_template = self._config.get(CONF_SET_POSITION_TEMPLATE)
if ATTR_POSITION in kwargs:
position = kwargs[ATTR_POSITION]
percentage_position = position
if set_position_template is not None:
try:
position = set_position_template.async_render(
**kwargs)
except TemplateError as ex:
_LOGGER.error(ex)
self._state = None
elif (self._config.get(CONF_POSITION_OPEN) != 100 and
self._config.get(CONF_POSITION_CLOSED) != 0):
position = self.find_in_range_from_percent(
position, COVER_PAYLOAD)
mqtt.async_publish(self.hass,
self._config.get(CONF_SET_POSITION_TOPIC),
position,
self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic:
self._state = percentage_position == \
self._config.get(CONF_POSITION_CLOSED)
self._position = percentage_position
self.async_schedule_update_ha_state()
def find_percentage_in_range(self, position, range_type=TILT_PAYLOAD):
"""Find the 0-100% value within the specified range."""
# the range of motion as defined by the min max values
if range_type == COVER_PAYLOAD:
max_range = self._config.get(CONF_POSITION_OPEN)
min_range = self._config.get(CONF_POSITION_CLOSED)
else:
max_range = self._config.get(CONF_TILT_MAX)
min_range = self._config.get(CONF_TILT_MIN)
current_range = max_range - min_range
# offset to be zero based
offset_position = position - min_range
position_percentage = round(
float(offset_position) / current_range * 100.0)
max_percent = 100
min_percent = 0
position_percentage = min(max(position_percentage, min_percent),
max_percent)
if range_type == TILT_PAYLOAD and \
self._config.get(CONF_TILT_INVERT_STATE):
return 100 - position_percentage
return position_percentage
def find_in_range_from_percent(self, percentage, range_type=TILT_PAYLOAD):
"""
Find the adjusted value for 0-100% within the specified range.
if the range is 80-180 and the percentage is 90
this method would determine the value to send on the topic
by offsetting the max and min, getting the percentage value and
returning the offset
"""
if range_type == COVER_PAYLOAD:
max_range = self._config.get(CONF_POSITION_OPEN)
min_range = self._config.get(CONF_POSITION_CLOSED)
else:
max_range = self._config.get(CONF_TILT_MAX)
min_range = self._config.get(CONF_TILT_MIN)
offset = min_range
current_range = max_range - min_range
position = round(current_range * (percentage / 100.0))
position += offset
if range_type == TILT_PAYLOAD and \
self._config.get(CONF_TILT_INVERT_STATE):
position = max_range - position + offset
return position
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
| {
"content_hash": "788ae8b51041a64905702996626d895b",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 79,
"avg_line_length": 40.04950495049505,
"alnum_prop": 0.6162175525339926,
"repo_name": "tinloaf/home-assistant",
"id": "3926c84cb924e895aa9aa1f8cec841062727fe80",
"size": "20225",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/cover/mqtt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
} |
"""
Support for D-link W215 smart switch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.dlink/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_USERNAME)
import homeassistant.helpers.config_validation as cv
from homeassistant.const import TEMP_CELSIUS, STATE_UNKNOWN
REQUIREMENTS = ['https://github.com/LinuxChristian/pyW215/archive/'
'v0.4.zip#pyW215==0.4']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'D-link Smart Plug W215'
DEFAULT_PASSWORD = ''
DEFAULT_USERNAME = 'admin'
CONF_USE_LEGACY_PROTOCOL = 'use_legacy_protocol'
ATTR_CURRENT_CONSUMPTION = 'Current Consumption'
ATTR_TOTAL_CONSUMPTION = 'Total Consumption'
ATTR_TEMPERATURE = 'Temperature'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_USE_LEGACY_PROTOCOL, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up a D-Link Smart Plug."""
from pyW215.pyW215 import SmartPlug
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_legacy_protocol = config.get(CONF_USE_LEGACY_PROTOCOL)
name = config.get(CONF_NAME)
data = SmartPlugData(SmartPlug(host,
password,
username,
use_legacy_protocol))
add_devices([SmartPlugSwitch(hass, data, name)], True)
class SmartPlugSwitch(SwitchDevice):
"""Representation of a D-link Smart Plug switch."""
def __init__(self, hass, data, name):
"""Initialize the switch."""
self.units = hass.config.units
self.data = data
self._name = name
@property
def name(self):
"""Return the name of the Smart Plug, if any."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
try:
ui_temp = self.units.temperature(int(self.data.temperature),
TEMP_CELSIUS)
temperature = "%i %s" % \
(ui_temp, self.units.temperature_unit)
except (ValueError, TypeError):
temperature = STATE_UNKNOWN
try:
current_consumption = "%.2f W" % \
float(self.data.current_consumption)
except ValueError:
current_consumption = STATE_UNKNOWN
try:
total_consumption = "%.1f kWh" % \
float(self.data.total_consumption)
except ValueError:
total_consumption = STATE_UNKNOWN
attrs = {
ATTR_CURRENT_CONSUMPTION: current_consumption,
ATTR_TOTAL_CONSUMPTION: total_consumption,
ATTR_TEMPERATURE: temperature
}
return attrs
@property
def current_power_watt(self):
"""Return the current power usage in Watt."""
try:
return float(self.data.current_consumption)
except ValueError:
return None
@property
def is_on(self):
"""Return true if switch is on."""
return self.data.state == 'ON'
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.data.smartplug.state = 'ON'
def turn_off(self):
"""Turn the switch off."""
self.data.smartplug.state = 'OFF'
def update(self):
"""Get the latest data from the smart plug and updates the states."""
self.data.update()
class SmartPlugData(object):
"""Get the latest data from smart plug."""
def __init__(self, smartplug):
"""Initialize the data object."""
self.smartplug = smartplug
self.state = None
self.temperature = None
self.current_consumption = None
self.total_consumption = None
def update(self):
"""Get the latest data from the smart plug."""
self.state = self.smartplug.state
self.temperature = self.smartplug.temperature
self.current_consumption = self.smartplug.current_consumption
self.total_consumption = self.smartplug.total_consumption
| {
"content_hash": "dfedacc60ca9a767b3ba13d726a3d780",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 77,
"avg_line_length": 31.958904109589042,
"alnum_prop": 0.6215173596228033,
"repo_name": "happyleavesaoc/home-assistant",
"id": "d5036f9cb0689cff4b51f2eb87783874d8fc935a",
"size": "4666",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/dlink.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1952235"
},
{
"name": "Python",
"bytes": "6672532"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "14949"
}
],
"symlink_target": ""
} |
import matplotlib
matplotlib.use('Agg')
import pyroms
import pyroms_toolbox
# load the grid
srcgrd = pyroms_toolbox.Grid_HYCOM.get_nc_Grid_HYCOM('/archive/u1/uaf/kate/HYCOM/Svalbard/HYCOM_GLBa0.08_North_grid2.nc')
dstgrd = pyroms.grid.get_ROMS_grid('ARCTIC2')
# make remap grid file for scrip
pyroms_toolbox.Grid_HYCOM.make_remap_grid_file(srcgrd)
pyroms.remapping.make_remap_grid_file(dstgrd, Cpos='rho')
pyroms.remapping.make_remap_grid_file(dstgrd, Cpos='u')
pyroms.remapping.make_remap_grid_file(dstgrd, Cpos='v')
# compute remap weights
# input namelist variables for bilinear remapping at rho points
grid1_file = 'remap_grid_GLBa0.08_NEP_t.nc'
grid2_file = 'remap_grid_ARCTIC2_rho.nc'
interp_file1 = 'remap_weights_GLBa0.08_to_ARCTIC2_bilinear_t_to_rho.nc'
interp_file2 = 'remap_weights_ARCTIC2_to_GLBa0.08_bilinear_rho_to_t.nc'
map1_name = 'GLBa0.08 to ARCTIC2 Bilinear Mapping'
map2_name = 'ARCTIC2 to GLBa0.08 Bilinear Mapping'
num_maps = 1
map_method = 'bilinear'
pyroms.remapping.compute_remap_weights(grid1_file, grid2_file, \
interp_file1, interp_file2, map1_name, \
map2_name, num_maps, map_method)
# compute remap weights
# input namelist variables for bilinear remapping at rho points
grid1_file = 'remap_grid_GLBa0.08_NEP_t.nc'
grid2_file = 'remap_grid_ARCTIC2_u.nc'
interp_file1 = 'remap_weights_GLBa0.08_to_ARCTIC2_bilinear_t_to_u.nc'
interp_file2 = 'remap_weights_ARCTIC2_to_GLBa0.08_bilinear_u_to_t.nc'
map1_name = 'GLBa0.08 to ARCTIC2 Bilinear Mapping'
map2_name = 'ARCTIC2 to GLBa0.08 Bilinear Mapping'
num_maps = 1
map_method = 'bilinear'
pyroms.remapping.compute_remap_weights(grid1_file, grid2_file, \
interp_file1, interp_file2, map1_name, \
map2_name, num_maps, map_method)
# compute remap weights
# input namelist variables for bilinear remapping at rho points
grid1_file = 'remap_grid_GLBa0.08_NEP_t.nc'
grid2_file = 'remap_grid_ARCTIC2_v.nc'
interp_file1 = 'remap_weights_GLBa0.08_to_ARCTIC2_bilinear_t_to_v.nc'
interp_file2 = 'remap_weights_ARCTIC2_to_GLBa0.08_bilinear_v_to_t.nc'
map1_name = 'GLBa0.08 to ARCTIC2 Bilinear Mapping'
map2_name = 'ARCTIC2 to GLBa0.08 Bilinear Mapping'
num_maps = 1
map_method = 'bilinear'
pyroms.remapping.compute_remap_weights(grid1_file, grid2_file, \
interp_file1, interp_file2, map1_name, \
map2_name, num_maps, map_method)
| {
"content_hash": "b0e053f17529995bf381b990df9ee9c5",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 121,
"avg_line_length": 38.483870967741936,
"alnum_prop": 0.732606873428332,
"repo_name": "kshedstrom/pyroms",
"id": "97f3241154900649d74e5bdc8da664ec509d2812",
"size": "2386",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/Arctic_HYCOM/make_remap_weights_file.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48069"
},
{
"name": "FORTRAN",
"bytes": "84335"
},
{
"name": "HTML",
"bytes": "6824662"
},
{
"name": "JavaScript",
"bytes": "31743"
},
{
"name": "Makefile",
"bytes": "879"
},
{
"name": "Python",
"bytes": "615238"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
} |
from django.conf.urls import url, patterns, include
from django.contrib.auth.decorators import login_required
from traceability.views import shipment
urlpatterns = patterns(
"",
url(
regex=r"^create/$",
view=login_required(shipment.ShipmentCreate.as_view()),
name="shipment_create",
),
url(
regex=r"^$",
view=shipment.ShipmentList.as_view(),
name='shipment_list',
),
url(
regex=r"^(?P<pk>\d+)/$",
view=shipment.ShipmentDetail.as_view(),
name="shipment_detail",
),
url(
regex=r"^update/(?P<pk>\d+)/$",
view=login_required(shipment.ShipmentUpdate.as_view()),
name="shipment_update",
),
)
| {
"content_hash": "9f573a40e4121ef4825965abdacb00c6",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 63,
"avg_line_length": 26.62962962962963,
"alnum_prop": 0.5897079276773296,
"repo_name": "vandorjw/django-traceability",
"id": "ff2ab590fdb6d68c6db0a1dd8810d2ba8ac9c340",
"size": "719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traceability/urls/shipment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "31896"
},
{
"name": "Python",
"bytes": "48845"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import boto
import boto.ec2
import boto3
import sure # noqa
from moto import mock_ec2, mock_ec2_deprecated
@mock_ec2_deprecated
def test_describe_regions():
conn = boto.connect_ec2('the_key', 'the_secret')
regions = conn.get_all_regions()
regions.should.have.length_of(16)
for region in regions:
region.endpoint.should.contain(region.name)
@mock_ec2_deprecated
def test_availability_zones():
conn = boto.connect_ec2('the_key', 'the_secret')
regions = conn.get_all_regions()
for region in regions:
conn = boto.ec2.connect_to_region(region.name)
if conn is None:
continue
for zone in conn.get_all_zones():
zone.name.should.contain(region.name)
@mock_ec2
def test_boto3_describe_regions():
ec2 = boto3.client('ec2', 'us-east-1')
resp = ec2.describe_regions()
resp['Regions'].should.have.length_of(16)
for rec in resp['Regions']:
rec['Endpoint'].should.contain(rec['RegionName'])
@mock_ec2
def test_boto3_availability_zones():
ec2 = boto3.client('ec2', 'us-east-1')
resp = ec2.describe_regions()
regions = [r['RegionName'] for r in resp['Regions']]
for region in regions:
conn = boto3.client('ec2', region)
resp = conn.describe_availability_zones()
for rec in resp['AvailabilityZones']:
rec['ZoneName'].should.contain(region)
| {
"content_hash": "27e64a037ec2686eaa797f838f47a262",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 57,
"avg_line_length": 29.224489795918366,
"alnum_prop": 0.6564245810055865,
"repo_name": "kefo/moto",
"id": "7226cacaf6642f833899b49b328bd68e5f7de0b7",
"size": "1432",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_ec2/test_availability_zones_and_regions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "712"
},
{
"name": "Python",
"bytes": "2996908"
},
{
"name": "Ruby",
"bytes": "188"
}
],
"symlink_target": ""
} |
from django import forms
from django.forms import ModelForm
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.forms import DateTimeField
from django.utils.functional import lazy
from geocamUtil.loader import LazyGetModelByName
class UserRegistrationForm(UserCreationForm):
email = forms.EmailField(required=True)
comments = forms.CharField(required=False, label="Introduce yourself", widget=forms.Textarea)
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
# Hack to modify the sequence in which the fields are rendered
self.fields.keyOrder = ['username', 'first_name', 'last_name', 'email', 'password1', 'password2', 'comments']
def clean_email(self):
"Ensure that email addresses are unique for new users."
email = self.cleaned_data['email']
if User.objects.filter(email=email).exists():
raise forms.ValidationError("A user with that email address already exists.")
return email
class Meta:
model = User
fields = ("username",'first_name', 'last_name', 'email', 'password1', 'password2', 'comments')
class EmailFeedbackForm(forms.Form):
reply_to = forms.EmailField(required=False, label="Your email address")
email_content = forms.CharField(widget=forms.Textarea, label="Message")
| {
"content_hash": "da9cd41f81f5514241538e30105b267f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 117,
"avg_line_length": 40.22222222222222,
"alnum_prop": 0.7133977900552486,
"repo_name": "xgds/xgds_core",
"id": "ac141e42036b9f3459c5f4591daf304d1236e8a0",
"size": "2208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xgds_core/registerForms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9920"
},
{
"name": "HTML",
"bytes": "50428"
},
{
"name": "JavaScript",
"bytes": "57295"
},
{
"name": "Python",
"bytes": "313670"
}
],
"symlink_target": ""
} |
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| {
"content_hash": "96b44f8fe50e2a28879aac95de659ebf",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 71,
"avg_line_length": 28.152173913043477,
"alnum_prop": 0.5737451737451738,
"repo_name": "RomainBrault/scikit-learn",
"id": "42943ba64f5a68826e0f6f7a65041be8a3702e92",
"size": "1295",
"binary": false,
"copies": "67",
"ref": "refs/heads/master",
"path": "examples/manifold/plot_swissroll.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451977"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7116720"
},
{
"name": "Shell",
"bytes": "19154"
}
],
"symlink_target": ""
} |
import numpy as np
class FillRaster():
def __init__(self):
self.name = "Fill Raster Function"
self.description = ("")
self.fillValue = 0.
def getParameterInfo(self):
return [
{
'name': 'raster',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': "Input Raster",
'description': ""
},
{
'name': 'value',
'dataType': 'numeric',
'value': 0,
'required': True,
'displayName': "Fill Value",
'description': ("")
},
]
def updateRasterInfo(self, **kwargs):
b = kwargs['raster_info']['bandCount']
self.fillValue = kwargs.get('value', 0.)
kwargs['output_info']['statistics'] = b * ({'minimum': self.fillValue, 'maximum': self.fillValue}, )
kwargs['output_info']['histogram'] = ()
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
pixelBlocks['output_pixels'] = np.full(shape, self.fillValue, dtype=props['pixelType'])
return pixelBlocks
| {
"content_hash": "0a26242d9a7a48b9c4b9481cf0b4a667",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 108,
"avg_line_length": 31.6,
"alnum_prop": 0.46598101265822783,
"repo_name": "Esri/raster-functions",
"id": "93fc97c5d163e9a15728145a2eceb4c20a010a8c",
"size": "1264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "functions/FillRaster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1685878"
}
],
"symlink_target": ""
} |
from builtins import object
from rest_framework import serializers
from bluebottle.organizations.models import Organization, OrganizationContact
from bluebottle.bluebottle_drf2.serializers import (
ImageSerializer
)
from rest_framework_json_api.serializers import ModelSerializer
from bluebottle.utils.fields import ValidationErrorsField, RequiredErrorsField
from bluebottle.utils.serializers import NoCommitMixin, ResourcePermissionField
class OrganizationSerializer(NoCommitMixin, ModelSerializer):
description = serializers.CharField(required=False, allow_blank=True)
slug = serializers.SlugField(allow_null=True, required=False)
name = serializers.CharField(required=True)
website = serializers.CharField(allow_blank=True, required=False)
logo = ImageSerializer(required=False, allow_null=True)
permissions = ResourcePermissionField('organization_detail', view_args=('pk',))
errors = ValidationErrorsField()
required = RequiredErrorsField()
included_serializers = {
'owner': 'bluebottle.initiatives.serializers.MemberSerializer',
}
class Meta(object):
model = Organization
fields = (
'id', 'name', 'slug', 'description', 'website', 'owner', 'logo',
'required', 'errors',
)
meta_fields = ['created', 'updated', 'errors', 'required', 'permissions']
class JSONAPIMeta(object):
resource_name = 'organizations'
included_resources = ['owner', ]
class OrganizationContactSerializer(NoCommitMixin, ModelSerializer):
name = serializers.CharField(required=False, allow_blank=True, allow_null=True)
email = serializers.CharField(required=False, allow_blank=True, allow_null=True)
phone = serializers.CharField(required=False, allow_blank=True, allow_null=True)
errors = ValidationErrorsField()
required = RequiredErrorsField()
included_serializers = {
'owner': 'bluebottle.initiatives.serializers.MemberSerializer',
}
class Meta(object):
model = OrganizationContact
fields = (
'id', 'name', 'email', 'phone',
'required', 'errors',
)
meta_fields = ['created', 'updated', 'errors', 'required']
class JSONAPIMeta(object):
resource_name = 'organization-contacts'
included_resources = ['owner', ]
| {
"content_hash": "801c88d2bdf21a1e10531abc785fa8fe",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 84,
"avg_line_length": 34.5735294117647,
"alnum_prop": 0.7022543598468737,
"repo_name": "onepercentclub/bluebottle",
"id": "fd4705db4db3f03e6dcbe1e935cb898e8b3816f0",
"size": "2351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/organizations/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
"""VeSync integration."""
import asyncio
import logging
from pyvesync import VeSync
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .common import async_process_devices
from .config_flow import configured_instances
from .const import (
DOMAIN,
SERVICE_UPDATE_DEVS,
VS_DISCOVERY,
VS_DISPATCHERS,
VS_FANS,
VS_LIGHTS,
VS_MANAGER,
VS_SWITCHES,
)
PLATFORMS = ["switch", "fan", "light"]
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the VeSync component."""
conf = config.get(DOMAIN)
if conf is None:
return True
if not configured_instances(hass):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: conf[CONF_USERNAME],
CONF_PASSWORD: conf[CONF_PASSWORD],
},
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up Vesync as config entry."""
username = config_entry.data[CONF_USERNAME]
password = config_entry.data[CONF_PASSWORD]
time_zone = str(hass.config.time_zone)
manager = VeSync(username, password, time_zone)
login = await hass.async_add_executor_job(manager.login)
if not login:
_LOGGER.error("Unable to login to the VeSync server")
return False
device_dict = await async_process_devices(hass, manager)
forward_setup = hass.config_entries.async_forward_entry_setup
hass.data[DOMAIN] = {}
hass.data[DOMAIN][VS_MANAGER] = manager
switches = hass.data[DOMAIN][VS_SWITCHES] = []
fans = hass.data[DOMAIN][VS_FANS] = []
lights = hass.data[DOMAIN][VS_LIGHTS] = []
hass.data[DOMAIN][VS_DISPATCHERS] = []
if device_dict[VS_SWITCHES]:
switches.extend(device_dict[VS_SWITCHES])
hass.async_create_task(forward_setup(config_entry, "switch"))
if device_dict[VS_FANS]:
fans.extend(device_dict[VS_FANS])
hass.async_create_task(forward_setup(config_entry, "fan"))
if device_dict[VS_LIGHTS]:
lights.extend(device_dict[VS_LIGHTS])
hass.async_create_task(forward_setup(config_entry, "light"))
async def async_new_device_discovery(service):
"""Discover if new devices should be added."""
manager = hass.data[DOMAIN][VS_MANAGER]
switches = hass.data[DOMAIN][VS_SWITCHES]
fans = hass.data[DOMAIN][VS_FANS]
lights = hass.data[DOMAIN][VS_LIGHTS]
dev_dict = await async_process_devices(hass, manager)
switch_devs = dev_dict.get(VS_SWITCHES, [])
fan_devs = dev_dict.get(VS_FANS, [])
light_devs = dev_dict.get(VS_LIGHTS, [])
switch_set = set(switch_devs)
new_switches = list(switch_set.difference(switches))
if new_switches and switches:
switches.extend(new_switches)
async_dispatcher_send(hass, VS_DISCOVERY.format(VS_SWITCHES), new_switches)
return
if new_switches and not switches:
switches.extend(new_switches)
hass.async_create_task(forward_setup(config_entry, "switch"))
fan_set = set(fan_devs)
new_fans = list(fan_set.difference(fans))
if new_fans and fans:
fans.extend(new_fans)
async_dispatcher_send(hass, VS_DISCOVERY.format(VS_FANS), new_fans)
return
if new_fans and not fans:
fans.extend(new_fans)
hass.async_create_task(forward_setup(config_entry, "fan"))
light_set = set(light_devs)
new_lights = list(light_set.difference(lights))
if new_lights and lights:
lights.extend(new_lights)
async_dispatcher_send(hass, VS_DISCOVERY.format(VS_LIGHTS), new_lights)
return
if new_lights and not lights:
lights.extend(new_lights)
hass.async_create_task(forward_setup(config_entry, "light"))
hass.services.async_register(
DOMAIN, SERVICE_UPDATE_DEVS, async_new_device_discovery
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| {
"content_hash": "47932c311e41428304e6b4f5368d4f13",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 87,
"avg_line_length": 29.92814371257485,
"alnum_prop": 0.6192476990796318,
"repo_name": "partofthething/home-assistant",
"id": "686a71427c33e7647d87cd866df8fee604597f5b",
"size": "4998",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/vesync/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
"""
Test cases for L{upnp.core.DIDLLite}
"""
from copy import copy
from twisted.trial import unittest
from coherence.upnp.core import DIDLLite
didl_fragment = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:dlna="urn:schemas-dlna-org:metadata-1-0"
xmlns:pv="http://www.pv.com/pvns/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/">
<container childCount="23" id="1161" parentID="103" restricted="0">
<dc:title>12</dc:title>
<upnp:class>object.container.album.musicAlbum</upnp:class>
<dc:date>1997-02-28T17:20:00+01:00</dc:date>
<upnp:albumArtURI dlna:profileID="JPEG_TN" xmlns:dlna="urn:schemas-dlna-org:metadata-1-0">http://192.168.1.1:30020/776dec17-1ce1-4c87-841e-cac61a14a2e0/1161?cover.jpg</upnp:albumArtURI>
<upnp:artist>Herby Sängermeister</upnp:artist>
</container>
</DIDL-Lite>"""
test_didl_fragment = """
<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite">
<item id="" restricted="0">
<dc:title>New Track</dc:title>
<upnp:class>object.item.audioItem.musicTrack</upnp:class>
<res protocolInfo="*:*:audio:*">
</res>
</item>
</DIDL-Lite>"""
class TestDIDLLite(unittest.TestCase):
def test_DIDLElement_class_detect(self):
""" tests class creation from an XML DIDLLite fragment,
expects a MusicAlbum container in return
"""
didl_element = DIDLLite.DIDLElement.fromString(didl_fragment)
items = didl_element.getItems()
self.assertEqual(len(items),1)
self.assertTrue(isinstance(items[0],DIDLLite.MusicAlbum))
def test_DIDLElement_class_2_detect(self):
""" tests class creation from an XML DIDLLite fragment,
expects a MusicTrack item in return
"""
didl_element = DIDLLite.DIDLElement.fromString(test_didl_fragment)
items = didl_element.getItems()
self.assertEqual(len(items),1)
self.assertTrue(isinstance(items[0],DIDLLite.MusicTrack))
def test_DIDLElement_class_fallback_1(self):
""" tests class fallback creation from an XML DIDLLite fragment with
an unknown UPnP class identifier,
expects an Album container in return
"""
wrong_didl_fragment = copy(didl_fragment)
wrong_didl_fragment = wrong_didl_fragment.replace('object.container.album.musicAlbum', 'object.container.album.videoAlbum')
didl_element = DIDLLite.DIDLElement.fromString(wrong_didl_fragment)
items = didl_element.getItems()
self.assertEqual(len(items),1)
self.assertTrue(isinstance(items[0],DIDLLite.Album))
def test_DIDLElement_class_fallback_2(self):
""" tests class fallback creation from an XML DIDLLite fragment with
an unknown UPnP class identifier,
expects an Exception.AttributeError
"""
wrong_didl_fragment = copy(didl_fragment)
wrong_didl_fragment = wrong_didl_fragment.replace('object.container.album.musicAlbum', 'object.wrongcontainer.wrongalbum.videoAlbum')
e = None
try:
didl_element = DIDLLite.DIDLElement.fromString(wrong_didl_fragment)
except AttributeError:
return
self.assert_(False,"DIDLElement didn't return None from a totally wrong UPnP class identifier")
| {
"content_hash": "dd0649ca7a19af113a910b15dd495cf7",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 193,
"avg_line_length": 43.49382716049383,
"alnum_prop": 0.6661935850127733,
"repo_name": "opendreambox/python-coherence",
"id": "cda5fea144cd76f35739b1cab6fb326e64d14f6c",
"size": "3688",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "coherence/upnp/core/test/test_didl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1247145"
},
{
"name": "Roff",
"bytes": "712"
},
{
"name": "Shell",
"bytes": "1569"
}
],
"symlink_target": ""
} |
from rest_framework import generics
from rest_framework import permissions
from permissions import IsOwnerOrReadOnly
from bilgecode.apps.passage_planner.models import Passage
from serializers import PassageSerializer
from django.contrib.auth.models import User
from serializers import UserSerializer
class PassageList(generics.ListCreateAPIView):
queryset = Passage.objects.all()
serializer_class = PassageSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class PassageDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Passage.objects.all()
serializer_class = PassageSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
| {
"content_hash": "bc7096ce5e4ecb0ca078639cb1190842",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 65,
"avg_line_length": 32.47826086956522,
"alnum_prop": 0.7965194109772423,
"repo_name": "BilgeCode/bilgecode.com",
"id": "041d7a5aa6a0299c22f260597b8987eb93c8b88e",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bilgecode/apps/api/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2312"
},
{
"name": "HTML",
"bytes": "43214"
},
{
"name": "Python",
"bytes": "25606"
}
],
"symlink_target": ""
} |
'''
HCView.py
Written by Jeff Berry on Mar 3 2011
purpose:
this is a helper file for TrackDots.py. The algorithms for
displaying and correcting the palatoglossatron dots are
contained here.
usage:
from TrackDots.py - this script should not be run directly
by the user.
'''
import os, subprocess
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import os, sys, time
import gnomecanvas
from math import *
from numpy import *
import cv
class ImageWindow:
def __init__(self, imagefiles, independent=True):
self.pathtofiles = '/'.join(imagefiles[0].split('/')[:-1]) + '/'
self.imagefiles = imagefiles
self.INDEPENDENT = independent
self.mouse_down = False
self.filenamesind = 0
self.close_enough = False
self.gladefile = "trackdots.glade"
self.wTree = gtk.glade.XML(self.gladefile, "HCView")
self.window = self.wTree.get_widget("HCView")
sigs = {"on_tbNext_clicked" : self.onNext,
"on_tbPrev_clicked" : self.onPrev,
"on_HCView_destroy" : self.onDestroy}
self.wTree.signal_autoconnect(sigs)
self.hbox = self.wTree.get_widget("hbox2")
self.statusbar = self.wTree.get_widget("statusbar2")
img = cv.LoadImageM(self.imagefiles[0], iscolor=False)
self.csize = shape(img)
self.canvas = gnomecanvas.Canvas(aa=True)
self.canvas.set_size_request(self.csize[1], self.csize[0])
self.canvas.set_scroll_region(0, 0, self.csize[1], self.csize[0])
# Put it together
self.hbox.add(self.canvas)
self.window.set_resizable(False)
self.window.show_all()
self.canvas.connect("event", self.canvas_event)
self.DrawPoints(self.filenamesind)
def onNext(self, event):
listlen = len(self.imagefiles)
self.savePoints(self.filenamesind)
self.filenamesind = (self.filenamesind + 1) % listlen
self.DrawPoints(self.filenamesind)
def onPrev(self, event):
listlen = len(self.imagefiles)
self.savePoints(self.filenamesind)
if self.filenamesind == 0:
self.filenamesind = listlen - 1
else:
self.filenamesind = self.filenamesind - 1
self.DrawPoints(self.filenamesind)
def onDestroy(self, event):
self.savePoints(self.filenamesind)
if self.INDEPENDENT:
gtk.main_quit()
else:
self.window.destroy()
def canvas_event(self, widget, event):
if (event.type == gtk.gdk.MOTION_NOTIFY):
context_id = self.statusbar.get_context_id("mouse motion")
text = "(" + str(event.x) + ", " + str(event.y) + ")"
self.statusbar.push(context_id, text)
if (self.mouse_down and self.close_enough):
self.points[self.selected_ind].set(x1=event.x-2, y1=event.y-2, x2=event.x+2, y2=event.y+2)
elif (event.type == gtk.gdk.KEY_PRESS):
self.onKeyPress(widget, event)
elif (event.type == gtk.gdk.BUTTON_PRESS):
if (event.button == 1):
self.mouse_down = True
ind, dist = self.find_distance(event.x, event.y)
if dist < 5.0:
self.selected_ind = ind
self.close_enough = True
elif ((event.type == gtk.gdk.BUTTON_RELEASE) and self.mouse_down):
self.mouse_down = False
self.close_enough = False
self.point_values[self.selected_ind] = [event.x, event.y]
def find_distance(self, x, y):
ind = 0
dist = 999999999.0
for i in range(len(self.point_values)):
this_dist = sqrt( (x-self.point_values[i][0])**2 + (y-self.point_values[i][1])**2 )
if this_dist < dist:
dist = this_dist
ind = i
return ind, dist
def onKeyPress(self, widget, event):
keyname = gtk.gdk.keyval_name(event.keyval)
#print "Key %s (%d) was pressed" % (keyname, event.keyval)
if keyname == 'Right':
self.onNext(event)
elif keyname == 'Left':
self.onPrev(event)
else:
return True
def set_canvas_background(self, location):
pixbuf = gtk.gdk.pixbuf_new_from_file(location)
itemType = gnomecanvas.CanvasPixbuf
self.background = self.canvas.root().add(itemType, x=0, y=0, pixbuf=pixbuf)
def DrawPoints(self, ind):
self.window.set_title(self.imagefiles[ind])
self.set_canvas_background(self.imagefiles[ind])
self.point_values = []
self.points = []
p = open(self.imagefiles[ind]+'.hc.txt', 'r').readlines()
for i in range(len(p)):
x = round(float(p[i][:-1].split('\t')[1]))
y = round(float(p[i][:-1].split('\t')[2]))
self.point_values.append([x, y])
self.points.append(self.canvas.root().add(gnomecanvas.CanvasEllipse, x1=x-2, y1=y-2, \
x2=x+2, y2=y+2, fill_color_rgba=0xFFFF00FF, width_units=2.0))
def savePoints(self, ind):
p = open(self.imagefiles[ind]+'.hc.txt', 'w')
for i in range(len(self.point_values)):
p.write("%d\t%f\t%f\n" % (i+1, self.point_values[i][0], self.point_values[i][1]))
p.close()
if __name__ == "__main__":
ImageWindow()
gtk.main()
| {
"content_hash": "30c83ee4d01ffa77e720a0e9d74f61ca",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 106,
"avg_line_length": 34.9874213836478,
"alnum_prop": 0.5653424411288873,
"repo_name": "JRMeyer/Autotrace",
"id": "d98df79ed4f42fc48226f000667ba85a2c532526",
"size": "5586",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "matlab-version/HCView.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11647"
},
{
"name": "Matlab",
"bytes": "99843"
},
{
"name": "Objective-C",
"bytes": "373"
},
{
"name": "Python",
"bytes": "505385"
},
{
"name": "R",
"bytes": "22590"
},
{
"name": "Shell",
"bytes": "754"
}
],
"symlink_target": ""
} |
from typing import Any
from zerver.lib.actions import do_create_realm, do_create_user, bulk_add_subscriptions
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.onboarding import send_initial_realm_messages, \
setup_initial_streams
from zerver.models import Realm, UserProfile
class Command(ZulipBaseCommand):
help = """Add a new realm and initial user for manual testing of the onboarding process."""
def handle(self, **options: Any) -> None:
string_id = 'realm%02d' % (
Realm.objects.filter(string_id__startswith='realm').count(),)
realm = do_create_realm(string_id, string_id)
setup_initial_streams(realm)
name = '%02d-user' % (
UserProfile.objects.filter(email__contains='user@').count(),)
user = do_create_user('%s@%s.zulip.com' % (name, string_id),
'password', realm, name, name, is_realm_admin=True)
bulk_add_subscriptions([realm.signup_notifications_stream], [user])
send_initial_realm_messages(realm)
| {
"content_hash": "c4b4bbe46709432516f319697e5a542f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 95,
"avg_line_length": 43.875,
"alnum_prop": 0.6676163342830009,
"repo_name": "dhcrzf/zulip",
"id": "7391d5a64fd28585ee96258a41dd514f17eda251",
"size": "1053",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "zilencer/management/commands/add_new_realm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "436713"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "673974"
},
{
"name": "JavaScript",
"bytes": "2951950"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "72908"
},
{
"name": "Python",
"bytes": "6188005"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "118284"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
} |
"""
Wrapper for ``run_server``, that restarts the server when source code is
modified.
"""
import os
import sys
from subprocess import Popen
def run():
args = sys.argv[1:]
if not "--reload" in args:
args.append("--reload")
print "run_reloading_server", args
try:
serverpath = os.path.join(os.path.dirname(__file__), "run_server.py")
while True:
p = Popen(["python", serverpath] + args,
# stdin=sys.stdin,
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
# preexec_fn, close_fds, shell, cwd, env, universal_newlines, startupinfo, creationflags
)
sys.stdout = p.stdout
sys.stderr = p.stderr
p.wait()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
if p.returncode == 3:
print "run_server returned 3: restarting..."
else:
print "run_server returned %s: terminating." % p.returncode
break
except Exception, e:
raise e
if __name__ == "__main__":
run()
| {
"content_hash": "88c7c4cc07df003f902c62a5458b0def",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 109,
"avg_line_length": 30.195121951219512,
"alnum_prop": 0.48546042003231016,
"repo_name": "StraNNiKK/wsgidav",
"id": "2276d2212cda672417f22d01bb2d484a711ec014",
"size": "1417",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wsgidav/server/run_reloading_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "741528"
}
],
"symlink_target": ""
} |
"""Placeholders for non-task-specific model inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Inputs(object):
def __init__(self, config):
self._config = config
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.label_smoothing = tf.placeholder(tf.float32, name='label_smoothing')
self.lengths = tf.placeholder(tf.int32, shape=[None], name='lengths')
self.mask = tf.placeholder(tf.float32, [None, None], name='mask')
self.words = tf.placeholder(tf.int32, shape=[None, None], name='words')
self.chars = tf.placeholder(tf.int32, shape=[None, None, None],
name='chars')
def create_feed_dict(self, mb, is_training):
cvt = mb.task_name == 'unlabeled'
return {
self.keep_prob: 1.0 if not is_training else
(self._config.unlabeled_keep_prob if cvt else
self._config.labeled_keep_prob),
self.label_smoothing: self._config.label_smoothing
if (is_training and not cvt) else 0.0,
self.lengths: mb.lengths,
self.words: mb.words,
self.chars: mb.chars,
self.mask: mb.mask.astype('float32')
}
| {
"content_hash": "251372062e549796b9e22d2cee8e565a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 39.39393939393939,
"alnum_prop": 0.6184615384615385,
"repo_name": "cshallue/models",
"id": "2a97004b3270eb01c3eb459eb892514b3584bf5a",
"size": "1989",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "research/cvt_text/model/shared_inputs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "2829707"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "13149300"
},
{
"name": "Shell",
"bytes": "146035"
}
],
"symlink_target": ""
} |
import datetime
from django.db import models
class BaseModel(models.Model):
is_active = models.BooleanField(default=True)
time_created = models.DateTimeField(default=datetime.datetime.now)
time_modified = models.DateTimeField(default=datetime.datetime.now)
def save(self, force_insert=False, force_update=False):
self.time_modified = datetime.datetime.now()
super(BaseModel, self).save(force_insert=False, force_update=False)
class Meta:
abstract = True
| {
"content_hash": "1d7c29a98f309f1df91808a48f631883",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.7247524752475247,
"repo_name": "tarequeh/little-ebay",
"id": "b1dd45901ecc1ec44bce87cffe4faac7f5c24e0b",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lebay/apps/base/models.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from collections import defaultdict
import datetime
import decimal
import json
from moto.compat import OrderedDict
from moto.core import BaseBackend
from moto.core.utils import unix_time
from .comparisons import get_comparison_func
class DynamoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'to_json'):
return obj.to_json()
def dynamo_json_dump(dynamo_object):
return json.dumps(dynamo_object, cls=DynamoJsonEncoder)
class DynamoType(object):
"""
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
"""
def __init__(self, type_as_dict):
self.type = list(type_as_dict)[0]
self.value = list(type_as_dict.values())[0]
def __hash__(self):
return hash((self.type, self.value))
def __eq__(self, other):
return (
self.type == other.type and
self.value == other.value
)
def __lt__(self, other):
return self.value < other.value
def __le__(self, other):
return self.value <= other.value
def __gt__(self, other):
return self.value > other.value
def __ge__(self, other):
return self.value >= other.value
def __repr__(self):
return "DynamoType: {0}".format(self.to_json())
@property
def cast_value(self):
if self.type == 'N':
return int(self.value)
else:
return self.value
def to_json(self):
return {self.type: self.value}
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.cast_value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.cast_value, *range_values)
class Item(object):
def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs):
self.hash_key = hash_key
self.hash_key_type = hash_key_type
self.range_key = range_key
self.range_key_type = range_key_type
self.attrs = {}
for key, value in attrs.items():
self.attrs[key] = DynamoType(value)
def __repr__(self):
return "Item: {0}".format(self.to_json())
def to_json(self):
attributes = {}
for attribute_key, attribute in self.attrs.items():
attributes[attribute_key] = {
attribute.type: attribute.value
}
return {
"Attributes": attributes
}
def describe_attrs(self, attributes):
if attributes:
included = {}
for key, value in self.attrs.items():
if key in attributes:
included[key] = value
else:
included = self.attrs
return {
"Item": included
}
def update(self, update_expression, expression_attribute_names, expression_attribute_values):
ACTION_VALUES = ['SET', 'set', 'REMOVE', 'remove']
action = None
for value in update_expression.split():
if value in ACTION_VALUES:
# An action
action = value
continue
else:
# A Real value
value = value.lstrip(":").rstrip(",")
for k, v in expression_attribute_names.items():
value = value.replace(k, v)
if action == "REMOVE" or action == 'remove':
self.attrs.pop(value, None)
elif action == 'SET' or action == 'set':
key, value = value.split("=")
if value in expression_attribute_values:
self.attrs[key] = DynamoType(expression_attribute_values[value])
else:
self.attrs[key] = DynamoType({"S": value})
def update_with_attribute_updates(self, attribute_updates):
for attribute_name, update_action in attribute_updates.items():
action = update_action['Action']
if action == 'DELETE' and not 'Value' in update_action:
if attribute_name in self.attrs:
del self.attrs[attribute_name]
continue
new_value = list(update_action['Value'].values())[0]
if action == 'PUT':
# TODO deal with other types
if isinstance(new_value, list) or isinstance(new_value, set):
self.attrs[attribute_name] = DynamoType({"SS": new_value})
elif isinstance(new_value, dict):
self.attrs[attribute_name] = DynamoType({"M": new_value})
elif update_action['Value'].keys() == ['N']:
self.attrs[attribute_name] = DynamoType({"N": new_value})
elif update_action['Value'].keys() == ['NULL']:
if attribute_name in self.attrs:
del self.attrs[attribute_name]
else:
self.attrs[attribute_name] = DynamoType({"S": new_value})
elif action == 'ADD':
if set(update_action['Value'].keys()) == set(['N']):
existing = self.attrs.get(attribute_name, DynamoType({"N": '0'}))
self.attrs[attribute_name] = DynamoType({"N": str(
decimal.Decimal(existing.value) +
decimal.Decimal(new_value)
)})
else:
# TODO: implement other data types
raise NotImplementedError('ADD not supported for %s' % ', '.join(update_action['Value'].keys()))
class Table(object):
def __init__(self, table_name, schema=None, attr=None, throughput=None, indexes=None, global_indexes=None):
self.name = table_name
self.attr = attr
self.schema = schema
self.range_key_attr = None
self.hash_key_attr = None
self.range_key_type = None
self.hash_key_type = None
for elem in schema:
if elem["KeyType"] == "HASH":
self.hash_key_attr = elem["AttributeName"]
self.hash_key_type = elem["KeyType"]
else:
self.range_key_attr = elem["AttributeName"]
self.range_key_type = elem["KeyType"]
if throughput is None:
self.throughput = {'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10}
else:
self.throughput = throughput
self.throughput["NumberOfDecreasesToday"] = 0
self.indexes = indexes
self.global_indexes = global_indexes if global_indexes else []
self.created_at = datetime.datetime.utcnow()
self.items = defaultdict(dict)
def describe(self, base_key='TableDescription'):
results = {
base_key: {
'AttributeDefinitions': self.attr,
'ProvisionedThroughput': self.throughput,
'TableSizeBytes': 0,
'TableName': self.name,
'TableStatus': 'ACTIVE',
'KeySchema': self.schema,
'ItemCount': len(self),
'CreationDateTime': unix_time(self.created_at),
'GlobalSecondaryIndexes': [index for index in self.global_indexes],
'LocalSecondaryIndexes': [index for index in self.indexes]
}
}
return results
def __len__(self):
count = 0
for key, value in self.items.items():
if self.has_range_key:
count += len(value)
else:
count += 1
return count
@property
def hash_key_names(self):
keys = [self.hash_key_attr]
for index in self.global_indexes:
hash_key = None
for key in index['KeySchema']:
if key['KeyType'] == 'HASH':
hash_key = key['AttributeName']
keys.append(hash_key)
return keys
@property
def range_key_names(self):
keys = [self.range_key_attr]
for index in self.global_indexes:
range_key = None
for key in index['KeySchema']:
if key['KeyType'] == 'RANGE':
range_key = keys.append(key['AttributeName'])
keys.append(range_key)
return keys
def put_item(self, item_attrs, expected=None, overwrite=False):
hash_value = DynamoType(item_attrs.get(self.hash_key_attr))
if self.has_range_key:
range_value = DynamoType(item_attrs.get(self.range_key_attr))
else:
range_value = None
item = Item(hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs)
if not overwrite:
if expected is None:
expected = {}
lookup_range_value = range_value
else:
expected_range_value = expected.get(self.range_key_attr, {}).get("Value")
if(expected_range_value is None):
lookup_range_value = range_value
else:
lookup_range_value = DynamoType(expected_range_value)
current = self.get_item(hash_value, lookup_range_value)
if current is None:
current_attr = {}
elif hasattr(current, 'attrs'):
current_attr = current.attrs
else:
current_attr = current
for key, val in expected.items():
if 'Exists' in val and val['Exists'] is False:
if key in current_attr:
raise ValueError("The conditional request failed")
elif key not in current_attr:
raise ValueError("The conditional request failed")
elif 'Value' in val and DynamoType(val['Value']).value != current_attr[key].value:
raise ValueError("The conditional request failed")
elif 'ComparisonOperator' in val:
comparison_func = get_comparison_func(val['ComparisonOperator'])
dynamo_types = [DynamoType(ele) for ele in val["AttributeValueList"]]
for t in dynamo_types:
if not comparison_func(current_attr[key].value, t.value):
raise ValueError('The conditional request failed')
if range_value:
self.items[hash_value][range_value] = item
else:
self.items[hash_value] = item
return item
def __nonzero__(self):
return True
def __bool__(self):
return self.__nonzero__()
@property
def has_range_key(self):
return self.range_key_attr is not None
def get_item(self, hash_key, range_key=None):
if self.has_range_key and not range_key:
raise ValueError("Table has a range key, but no range key was passed into get_item")
try:
if range_key:
return self.items[hash_key][range_key]
if hash_key in self.items:
return self.items[hash_key]
raise KeyError
except KeyError:
return None
def delete_item(self, hash_key, range_key):
try:
if range_key:
return self.items[hash_key].pop(range_key)
else:
return self.items.pop(hash_key)
except KeyError:
return None
def query(self, hash_key, range_comparison, range_objs, limit,
exclusive_start_key, scan_index_forward, index_name=None, **filter_kwargs):
results = []
if index_name:
all_indexes = (self.global_indexes or []) + (self.indexes or [])
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
if index_name not in indexes_by_name:
raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % (
index_name, self.name, ', '.join(indexes_by_name.keys())
))
index = indexes_by_name[index_name]
try:
index_hash_key = [key for key in index['KeySchema'] if key['KeyType'] == 'HASH'][0]
except IndexError:
raise ValueError('Missing Hash Key. KeySchema: %s' % index['KeySchema'])
possible_results = []
for item in self.all_items():
if not isinstance(item, Item):
continue
item_hash_key = item.attrs.get(index_hash_key['AttributeName'])
if item_hash_key and item_hash_key == hash_key:
possible_results.append(item)
else:
possible_results = [item for item in list(self.all_items()) if isinstance(item, Item) and item.hash_key == hash_key]
if index_name:
try:
index_range_key = [key for key in index['KeySchema'] if key['KeyType'] == 'RANGE'][0]
except IndexError:
index_range_key = None
if range_comparison:
if index_name and not index_range_key:
raise ValueError('Range Key comparison but no range key found for index: %s' % index_name)
elif index_name:
for result in possible_results:
if result.attrs.get(index_range_key['AttributeName']).compare(range_comparison, range_objs):
results.append(result)
else:
for result in possible_results:
if result.range_key.compare(range_comparison, range_objs):
results.append(result)
if filter_kwargs:
for result in possible_results:
for field, value in filter_kwargs.items():
dynamo_types = [DynamoType(ele) for ele in value["AttributeValueList"]]
if result.attrs.get(field).compare(value['ComparisonOperator'], dynamo_types):
results.append(result)
if not range_comparison and not filter_kwargs:
# If we're not filtering on range key or on an index return all values
results = possible_results
if index_name:
if index_range_key:
results.sort(key=lambda item: item.attrs[index_range_key['AttributeName']].value
if item.attrs.get(index_range_key['AttributeName']) else None)
else:
results.sort(key=lambda item: item.range_key)
if scan_index_forward is False:
results.reverse()
scanned_count = len(list(self.all_items()))
results, last_evaluated_key = self._trim_results(results, limit,
exclusive_start_key)
return results, scanned_count, last_evaluated_key
def all_items(self):
for hash_set in self.items.values():
if self.range_key_attr:
for item in hash_set.values():
yield item
else:
yield hash_set
def scan(self, filters, limit, exclusive_start_key):
results = []
scanned_count = 0
for result in self.all_items():
scanned_count += 1
passes_all_conditions = True
for attribute_name, (comparison_operator, comparison_objs) in filters.items():
attribute = result.attrs.get(attribute_name)
if attribute:
# Attribute found
if not attribute.compare(comparison_operator, comparison_objs):
passes_all_conditions = False
break
elif comparison_operator == 'NULL':
# Comparison is NULL and we don't have the attribute
continue
else:
# No attribute found and comparison is no NULL. This item fails
passes_all_conditions = False
break
if passes_all_conditions:
results.append(result)
results, last_evaluated_key = self._trim_results(results, limit,
exclusive_start_key)
return results, scanned_count, last_evaluated_key
def _trim_results(self, results, limit, exclusive_start_key):
if exclusive_start_key is not None:
hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr))
range_key = exclusive_start_key.get(self.range_key_attr)
if range_key is not None:
range_key = DynamoType(range_key)
for i in range(len(results)):
if results[i].hash_key == hash_key and results[i].range_key == range_key:
results = results[i + 1:]
break
last_evaluated_key = None
if limit and len(results) > limit:
results = results[:limit]
last_evaluated_key = {
self.hash_key_attr: results[-1].hash_key
}
if results[-1].range_key is not None:
last_evaluated_key[self.range_key_attr] = results[-1].range_key
return results, last_evaluated_key
def lookup(self, *args, **kwargs):
if not self.schema:
self.describe()
for x, arg in enumerate(args):
kwargs[self.schema[x].name] = arg
ret = self.get_item(**kwargs)
if not ret.keys():
return None
return ret
class DynamoDBBackend(BaseBackend):
def __init__(self):
self.tables = OrderedDict()
def create_table(self, name, **params):
if name in self.tables:
return None
table = Table(name, **params)
self.tables[name] = table
return table
def delete_table(self, name):
return self.tables.pop(name, None)
def update_table_throughput(self, name, throughput):
table = self.tables[name]
table.throughput = throughput
return table
def update_table_global_indexes(self, name, global_index_updates):
table = self.tables[name]
gsis_by_name = dict((i['IndexName'], i) for i in table.global_indexes)
for gsi_update in global_index_updates:
gsi_to_create = gsi_update.get('Create')
gsi_to_update = gsi_update.get('Update')
gsi_to_delete = gsi_update.get('Delete')
if gsi_to_delete:
index_name = gsi_to_delete['IndexName']
if index_name not in gsis_by_name:
raise ValueError('Global Secondary Index does not exist, but tried to delete: %s' %
gsi_to_delete['IndexName'])
del gsis_by_name[index_name]
if gsi_to_update:
index_name = gsi_to_update['IndexName']
if index_name not in gsis_by_name:
raise ValueError('Global Secondary Index does not exist, but tried to update: %s' %
gsi_to_update['IndexName'])
gsis_by_name[index_name].update(gsi_to_update)
if gsi_to_create:
if gsi_to_create['IndexName'] in gsis_by_name:
raise ValueError('Global Secondary Index already exists: %s' % gsi_to_create['IndexName'])
gsis_by_name[gsi_to_create['IndexName']] = gsi_to_create
table.global_indexes = gsis_by_name.values()
return table
def put_item(self, table_name, item_attrs, expected=None, overwrite=False):
table = self.tables.get(table_name)
if not table:
return None
return table.put_item(item_attrs, expected, overwrite)
def get_table_keys_name(self, table_name, keys):
"""
Given a set of keys, extracts the key and range key
"""
table = self.tables.get(table_name)
if not table:
return None, None
else:
if len(keys) == 1:
for key in keys:
if key in table.hash_key_names:
return key, None
# for potential_hash, potential_range in zip(table.hash_key_names, table.range_key_names):
# if set([potential_hash, potential_range]) == set(keys):
# return potential_hash, potential_range
potential_hash, potential_range = None, None
for key in set(keys):
if key in table.hash_key_names:
potential_hash = key
elif key in table.range_key_names:
potential_range = key
return potential_hash, potential_range
def get_keys_value(self, table, keys):
if table.hash_key_attr not in keys or (table.has_range_key and table.range_key_attr not in keys):
raise ValueError("Table has a range key, but no range key was passed into get_item")
hash_key = DynamoType(keys[table.hash_key_attr])
range_key = DynamoType(keys[table.range_key_attr]) if table.has_range_key else None
return hash_key, range_key
def get_table(self, table_name):
return self.tables.get(table_name)
def get_item(self, table_name, keys):
table = self.get_table(table_name)
if not table:
raise ValueError("No table found")
hash_key, range_key = self.get_keys_value(table, keys)
return table.get_item(hash_key, range_key)
def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts,
limit, exclusive_start_key, scan_index_forward, index_name=None, **filter_kwargs):
table = self.tables.get(table_name)
if not table:
return None, None
hash_key = DynamoType(hash_key_dict)
range_values = [DynamoType(range_value) for range_value in range_value_dicts]
return table.query(hash_key, range_comparison, range_values, limit,
exclusive_start_key, scan_index_forward, index_name, **filter_kwargs)
def scan(self, table_name, filters, limit, exclusive_start_key):
table = self.tables.get(table_name)
if not table:
return None, None, None
scan_filters = {}
for key, (comparison_operator, comparison_values) in filters.items():
dynamo_types = [DynamoType(value) for value in comparison_values]
scan_filters[key] = (comparison_operator, dynamo_types)
return table.scan(scan_filters, limit, exclusive_start_key)
def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values):
table = self.get_table(table_name)
if all([table.hash_key_attr in key, table.range_key_attr in key]):
# Covers cases where table has hash and range keys, ``key`` param will be a dict
hash_value = DynamoType(key[table.hash_key_attr])
range_value = DynamoType(key[table.range_key_attr])
elif table.hash_key_attr in key:
# Covers tables that have a range key where ``key`` param is a dict
hash_value = DynamoType(key[table.hash_key_attr])
range_value = None
else:
# Covers other cases
hash_value = DynamoType(key)
range_value = None
item = table.get_item(hash_value, range_value)
# Update does not fail on new items, so create one
if item is None:
data = {
table.hash_key_attr: {
hash_value.type: hash_value.value,
},
}
if range_value:
data.update({
table.range_key_attr: {
range_value.type: range_value.value,
}
})
table.put_item(data)
item = table.get_item(hash_value, range_value)
if update_expression:
item.update(update_expression, expression_attribute_names, expression_attribute_values)
else:
item.update_with_attribute_updates(attribute_updates)
return item
def delete_item(self, table_name, keys):
table = self.tables.get(table_name)
if not table:
return None
hash_key, range_key = self.get_keys_value(table, keys)
return table.delete_item(hash_key, range_key)
dynamodb_backend2 = DynamoDBBackend()
| {
"content_hash": "0c788b122e0f5ae6978f94b370790cb3",
"timestamp": "",
"source": "github",
"line_count": 642,
"max_line_length": 138,
"avg_line_length": 38.41588785046729,
"alnum_prop": 0.5513116814661639,
"repo_name": "braintreeps/moto",
"id": "a70d6347dfb136c6f7f248e78adcfda96726dfe9",
"size": "24663",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moto/dynamodb2/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "230"
},
{
"name": "Python",
"bytes": "2427571"
}
],
"symlink_target": ""
} |
"""
DWF Python Example
Modified by: MURAMATSU Atsushi <[email protected]>
Revised: 2016-04-21
Original Author: Digilent, Inc.
Original Revision: 12/29/2015
Requires:
Python 2.7, 3.3 or lator
"""
import dwf
import time
#print DWF version
print("DWF Version: " + dwf.FDwfGetVersion())
#open device
print("Opening first device")
dwf_aio = dwf.DwfAnalogIO()
print("Device temperature and USB/AUX supply voltage and current")
#monitor voltage, current, temperature
#60 times, once per second
for i in range(60):
# wait between readings
time.sleep(1)
# fetch analog IO status from device
dwf_aio.status()
# get system monitor readings
usbVoltage = dwf_aio.channelNodeStatus(2, 0)
usbCurrent = dwf_aio.channelNodeStatus(2, 1)
deviceTemperature = dwf_aio.channelNodeStatus(2, 2)
auxVoltage = dwf_aio.channelNodeStatus(3, 0)
auxCurrent = dwf_aio.channelNodeStatus(3, 1)
print("Temperature: %.2fdegC" % deviceTemperature)
print("USB:\t%.3fV\t%.3fA" % (usbVoltage, usbCurrent))
print("AUX:\t%.3fV\t%.3fA" % (auxVoltage, auxCurrent))
#close the device
dwf_aio.close()
| {
"content_hash": "48e2d6f26d542f62be5addebcb14f387",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 66,
"avg_line_length": 28.071428571428573,
"alnum_prop": 0.6802374893977947,
"repo_name": "amuramatsu/dwf",
"id": "7dd6612c7620baa70fae8965feae47c824266288",
"size": "1226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/AnalogIO_AnalogDiscovery2_SystemMonitor.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121719"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.